]> git.saurik.com Git - apple/javascriptcore.git/commitdiff
JavaScriptCore-721.26.tar.gz ios-43 ios-431 ios-432 ios-433 v721.26
authorApple <opensource@apple.com>
Sat, 7 May 2011 18:58:22 +0000 (18:58 +0000)
committerApple <opensource@apple.com>
Sat, 7 May 2011 18:58:22 +0000 (18:58 +0000)
44 files changed:
JavaScriptCore.exp
assembler/ARMv7Assembler.cpp [new file with mode: 0644]
assembler/ARMv7Assembler.h
assembler/AbstractMacroAssembler.h
assembler/LinkBuffer.h
assembler/MacroAssemblerARM.h
assembler/MacroAssemblerARMv7.h
assembler/MacroAssemblerX86Common.h
bytecompiler/BytecodeGenerator.h
bytecompiler/NodesCodegen.cpp
debugger/Debugger.cpp
jit/ExecutableAllocator.h
jit/ExecutableAllocatorFixedVMPool.cpp
jit/ExecutableAllocatorPosix.cpp
jit/ExecutableAllocatorWin.cpp
jit/JIT.cpp
jit/JIT.h
jit/JITOpcodes32_64.cpp
jit/JITPropertyAccess.cpp
jit/JITPropertyAccess32_64.cpp
jit/SpecializedThunkJIT.h
runtime/Collector.cpp
runtime/Collector.h
runtime/Executable.cpp
runtime/Executable.h
runtime/JSArray.cpp
runtime/JSGlobalData.cpp
runtime/JSGlobalData.h
runtime/JSGlobalObject.cpp
runtime/JSGlobalObject.h
runtime/JSString.cpp
runtime/JSString.h
runtime/JSValue.h
runtime/RegExpCache.cpp
runtime/RegExpCache.h
runtime/StringPrototype.cpp
runtime/UString.cpp
wtf/FastMalloc.cpp
wtf/Platform.h
wtf/text/CString.cpp
wtf/text/StringImpl.cpp
wtf/text/StringImpl.h
wtf/text/WTFString.cpp
yarr/RegexJIT.cpp

index 3f408c6cbf261fd9e33eff54a115b8b0b4e5d25b..6b5f9b7d506b23244e172b5ace267d7396313c64 100644 (file)
@@ -1,5 +1,3 @@
-__ZN7WebCore10StringImpl6createEPKcj
-__ZN7WebCore12AtomicString11addSlowCaseEPNS_10StringImplE
 _JSCheckScriptSyntax
 _JSClassCreate
 _JSClassRelease
@@ -90,7 +88,6 @@ _JSWeakObjectMapClear
 _JSWeakObjectMapCreate
 _JSWeakObjectMapGet
 _JSWeakObjectMapSet
-_WebCoreWebThreadIsLockedOrDisabled
 _WTFLog
 _WTFLogVerbose
 _WTFReportArgumentAssertionFailure
@@ -98,6 +95,7 @@ _WTFReportAssertionFailure
 _WTFReportAssertionFailureWithMessage
 _WTFReportError
 _WTFReportFatalError
+_WebCoreWebThreadIsLockedOrDisabled
 __Z12jsRegExpFreeP8JSRegExp
 __Z15jsRegExpCompilePKti24JSRegExpIgnoreCaseOption23JSRegExpMultilineOptionPjPPKc
 __Z15jsRegExpExecutePK8JSRegExpPKtiiPii
@@ -193,6 +191,7 @@ __ZN3JSC20MarkedArgumentBuffer10slowAppendENS_7JSValueE
 __ZN3JSC23AbstractSamplingCounter4dumpEv
 __ZN3JSC23objectProtoFuncToStringEPNS_9ExecStateEPNS_8JSObjectENS_7JSValueERKNS_7ArgListE
 __ZN3JSC23setUpStaticFunctionSlotEPNS_9ExecStateEPKNS_9HashEntryEPNS_8JSObjectERKNS_10IdentifierERNS_12PropertySlotE
+__ZN3JSC24DynamicGlobalObjectScopeC1EPNS_9ExecStateEPNS_14JSGlobalObjectE
 __ZN3JSC24createStackOverflowErrorEPNS_9ExecStateE
 __ZN3JSC25evaluateInGlobalCallFrameERKNS_7UStringERNS_7JSValueEPNS_14JSGlobalObjectE
 __ZN3JSC35createInterruptedExecutionExceptionEPNS_12JSGlobalDataE
@@ -393,6 +392,7 @@ __ZN7WebCore10StringImpl5lowerEv
 __ZN7WebCore10StringImpl5toIntEPb
 __ZN7WebCore10StringImpl5upperEv
 __ZN7WebCore10StringImpl6createEPKc
+__ZN7WebCore10StringImpl6createEPKcj
 __ZN7WebCore10StringImpl6createEPKtj
 __ZN7WebCore10StringImpl6createEPKtjN3WTF10PassRefPtrINS3_21CrossThreadRefCountedINS3_16OwnFastMallocPtrIS1_EEEEEE
 __ZN7WebCore10StringImpl6secureEtb
@@ -404,6 +404,7 @@ __ZN7WebCore10StringImpl8endsWithEPS0_b
 __ZN7WebCore10StringImpl9substringEjj
 __ZN7WebCore10StringImplD1Ev
 __ZN7WebCore11commentAtomE
+__ZN7WebCore12AtomicString11addSlowCaseEPNS_10StringImplE
 __ZN7WebCore12AtomicString3addEPKc
 __ZN7WebCore12AtomicString3addEPKt
 __ZN7WebCore12AtomicString3addEPKtj
@@ -546,3 +547,6 @@ __ZTVN3JSC8JSObjectE
 __ZTVN3JSC8JSStringE
 _jscore_fastmalloc_introspection
 _kJSClassDefinitionEmpty
+
+# iOS Methods
+__ZN3JSC12JSGlobalData20sharedInstanceExistsEv
diff --git a/assembler/ARMv7Assembler.cpp b/assembler/ARMv7Assembler.cpp
new file mode 100644 (file)
index 0000000..7aa1f10
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "ARMv7Assembler.h"
+
+namespace JSC {
+
+const int ARMv7Assembler::JumpSizes[] = { 0xffffffff, sizeof(uint16_t), sizeof(uint16_t),
+    2 * sizeof(uint16_t), 2 * sizeof(uint16_t), 3 * sizeof(uint16_t), 5 * sizeof(uint16_t), 6 * sizeof(uint16_t) };
+const int ARMv7Assembler::JumpPaddingSizes[] = { 0, 5 * sizeof(uint16_t), 6 * sizeof(uint16_t),
+    5 * sizeof(uint16_t), 6 * sizeof(uint16_t) };
+
+}
+
+#endif
index 2faa3a6102acc3197a9e2ad4e48f9a425607e01a..13ad3e0d3259d83e4147a139082734bc62b376f4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
  * Copyright (C) 2010 University of Szeged
  *
  * Redistribution and use in source and binary forms, with or without
@@ -304,7 +304,7 @@ public:
         }
 
         if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
-            encoding.immediate = bytes.byte0;
+            encoding.immediate = bytes.byte1;
             encoding.pattern = 2;
             return ARMThumbImmediate(TypeEncoded, encoding);
         }
@@ -440,12 +440,11 @@ private:
         };
         struct {
             unsigned type   : 2;
-            unsigned amount : 5;
+            unsigned amount : 6;
         };
     } m_u;
 };
 
-
 class ARMv7Assembler {
 public:
     ~ARMv7Assembler()
@@ -476,14 +475,45 @@ public:
         ConditionGT,
         ConditionLE,
         ConditionAL,
-
+        
         ConditionCS = ConditionHS,
         ConditionCC = ConditionLO,
     } Condition;
 
+    enum JumpType { JumpFixed, JumpNoCondition, JumpCondition, JumpNoConditionFixedSize, JumpConditionFixedSize, JumpTypeCount };
+    enum JumpLinkType { LinkInvalid, LinkJumpT1, LinkJumpT2, LinkJumpT3,
+        LinkJumpT4, LinkConditionalJumpT4, LinkBX, LinkConditionalBX, JumpLinkTypeCount };
+    static const int JumpSizes[JumpLinkTypeCount];
+    static const int JumpPaddingSizes[JumpTypeCount];
+    class LinkRecord {
+    public:
+        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+            : m_from(from)
+            , m_to(to)
+            , m_type(type)
+            , m_linkType(LinkInvalid)
+            , m_condition(condition)
+        {
+        }
+        intptr_t from() const { return m_from; }
+        void setFrom(intptr_t from) { m_from = from; }
+        intptr_t to() const { return m_to; }
+        JumpType type() const { return m_type; }
+        JumpLinkType linkType() const { return m_linkType; }
+        void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; }
+        Condition condition() const { return m_condition; }
+    private:
+        intptr_t m_from : 31;
+        intptr_t m_to : 31;
+        JumpType m_type : 3;
+        JumpLinkType m_linkType : 4;
+        Condition m_condition : 16;
+    };
+    
     class JmpSrc {
         friend class ARMv7Assembler;
         friend class ARMInstructionFormatter;
+        friend class LinkBuffer;
     public:
         JmpSrc()
             : m_offset(-1)
@@ -491,17 +521,32 @@ public:
         }
 
     private:
-        JmpSrc(int offset)
+        JmpSrc(int offset, JumpType type)
+            : m_offset(offset)
+            , m_condition(0xffff)
+            , m_type(type)
+        {
+            ASSERT(m_type == JumpFixed || m_type == JumpNoCondition || m_type == JumpNoConditionFixedSize);
+        }
+
+        JmpSrc(int offset, JumpType type, Condition condition)
             : m_offset(offset)
+            , m_condition(condition)
+            , m_type(type)
         {
+            ASSERT(m_type == JumpFixed || m_type == JumpCondition || m_type == JumpConditionFixedSize);
         }
 
         int m_offset;
+        Condition m_condition : 16;
+        JumpType m_type : 16;
+        
     };
     
     class JmpDst {
         friend class ARMv7Assembler;
         friend class ARMInstructionFormatter;
+        friend class LinkBuffer;
     public:
         JmpDst()
             : m_offset(-1)
@@ -525,17 +570,6 @@ public:
 
 private:
 
-    struct LinkRecord {
-        LinkRecord(intptr_t from, intptr_t to)
-            : from(from)
-            , to(to)
-        {
-        }
-
-        intptr_t from;
-        intptr_t to;
-    };
-
     // ARMv7, Appx-A.6.3
     bool BadReg(RegisterID reg)
     {
@@ -597,6 +631,8 @@ private:
     } OpcodeID;
 
     typedef enum {
+        OP_B_T1         = 0xD000,
+        OP_B_T2         = 0xE000,
         OP_AND_reg_T2   = 0xEA00,
         OP_TST_reg_T2   = 0xEA10,
         OP_ORR_reg_T2   = 0xEA40,
@@ -620,10 +656,11 @@ private:
         OP_VADD_T2      = 0xEE30,
         OP_VSUB_T2      = 0xEE30,
         OP_VDIV         = 0xEE80,
-        OP_VCMP_T1      = 0xEEB0,
+        OP_VCMP         = 0xEEB0,
         OP_VCVT_FPIVFP  = 0xEEB0,
         OP_VMOV_IMM_T2  = 0xEEB0,
         OP_VMRS         = 0xEEB0,
+        OP_B_T3a        = 0xF000,
         OP_B_T4a        = 0xF000,
         OP_AND_imm_T1   = 0xF000,
         OP_TST_imm      = 0xF010,
@@ -672,10 +709,11 @@ private:
         OP_VMOV_CtoSb   = 0x0A10,
         OP_VMOV_StoCb   = 0x0A10,
         OP_VMRSb        = 0x0A10,
-        OP_VCMP_T1b     = 0x0A40,
+        OP_VCMPb        = 0x0A40,
         OP_VCVT_FPIVFPb = 0x0A40,
         OP_VSUB_T2b     = 0x0A40,
         OP_NOP_T2b      = 0x8000,
+        OP_B_T3b        = 0x8000,
         OP_B_T4b        = 0x9000,
     } OpcodeID2;
 
@@ -712,7 +750,7 @@ private:
             | (ifThenElseConditionBit(condition, inst3if) << 2)
             | (ifThenElseConditionBit(condition, inst4if) << 1)
             | 1;
-        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
         return (condition << 4) | mask;
     }
     uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
@@ -720,26 +758,25 @@ private:
         int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
             | (ifThenElseConditionBit(condition, inst3if) << 2)
             | 2;
-        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
         return (condition << 4) | mask;
     }
     uint8_t ifThenElse(Condition condition, bool inst2if)
     {
         int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
             | 4;
-        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
         return (condition << 4) | mask;
     }
 
     uint8_t ifThenElse(Condition condition)
     {
         int mask = 8;
-        ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
         return (condition << 4) | mask;
     }
 
 public:
-
+    
     void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
     {
         // Rd can only be SP if Rn is also SP.
@@ -878,27 +915,33 @@ public:
         ASSERT(!BadReg(rm));
         m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
     }
-
+    
     // Only allowed in IT (if then) block if last instruction.
-    JmpSrc b()
+    JmpSrc b(JumpType type)
     {
         m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
-        return JmpSrc(m_formatter.size());
+        return JmpSrc(m_formatter.size(), type);
     }
     
     // Only allowed in IT (if then) block if last instruction.
-    JmpSrc blx(RegisterID rm)
+    JmpSrc blx(RegisterID rm, JumpType type)
     {
         ASSERT(rm != ARMRegisters::pc);
         m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
-        return JmpSrc(m_formatter.size());
+        return JmpSrc(m_formatter.size(), type);
     }
 
     // Only allowed in IT (if then) block if last instruction.
-    JmpSrc bx(RegisterID rm)
+    JmpSrc bx(RegisterID rm, JumpType type, Condition condition)
+    {
+        m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+        return JmpSrc(m_formatter.size(), type, condition);
+    }
+
+    JmpSrc bx(RegisterID rm, JumpType type)
     {
         m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
-        return JmpSrc(m_formatter.size());
+        return JmpSrc(m_formatter.size(), type);
     }
 
     void bkpt(uint8_t imm=0)
@@ -1513,7 +1556,12 @@ public:
 
     void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
     {
-        m_formatter.vfpOp(OP_VCMP_T1, OP_VCMP_T1b, true, VFPOperand(4), rd, rm);
+        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
+    }
+
+    void vcmpz_F64(FPDoubleRegisterID rd)
+    {
+        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
     }
 
     void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm)
@@ -1538,11 +1586,6 @@ public:
         m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
     }
 
-    void vmov_F64_0(FPDoubleRegisterID rd)
-    {
-        m_formatter.vfpOp(OP_VMOV_IMM_T2, OP_VMOV_IMM_T2b, true, VFPOperand(0), rd, VFPOperand(0));
-    }
-
     void vmov(RegisterID rd, FPSingleRegisterID rn)
     {
         ASSERT(!BadReg(rd));
@@ -1617,6 +1660,15 @@ public:
     {
         return dst.m_offset - src.m_offset;
     }
+
+    int executableOffsetFor(int location)
+    {
+        if (!location)
+            return 0;
+        return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
+    }
+    
+    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JumpPaddingSizes[jumpType] - JumpSizes[jumpLinkType]; }
     
     // Assembler admin methods:
 
@@ -1625,22 +1677,125 @@ public:
         return m_formatter.size();
     }
 
-    void* executableCopy(ExecutablePool* allocator)
+    static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
     {
-        void* copy = m_formatter.executableCopy(allocator);
+        return a.from() < b.from();
+    }
 
-        unsigned jumpCount = m_jumpsToLink.size();
-        for (unsigned i = 0; i < jumpCount; ++i) {
-            uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from);
-            uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to);
-            linkJumpAbsolute(location, target);
+    bool canCompact(JumpType jumpType)
+    {
+        // The following cannot be compacted:
+        //   JumpFixed: represents custom jump sequence
+        //   JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
+        //   JumpConditionFixedSize: represents conditional jump that must remain a fixed size
+        return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
+    }
+    
+    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+    {
+        if (jumpType == JumpFixed)
+            return LinkInvalid;
+        
+        // for patchable jump we must leave space for the longest code sequence
+        if (jumpType == JumpNoConditionFixedSize)
+            return LinkBX;
+        if (jumpType == JumpConditionFixedSize)
+            return LinkConditionalBX;
+        
+        const int paddingSize = JumpPaddingSizes[jumpType];
+        bool mayTriggerErrata = false;
+        
+        if (jumpType == JumpCondition) {
+            // 2-byte conditional T1
+            const uint16_t* jumpT1Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT1]));
+            if (canBeJumpT1(jumpT1Location, to))
+                return LinkJumpT1;
+            // 4-byte conditional T3
+            const uint16_t* jumpT3Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT3]));
+            if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) {
+                if (!mayTriggerErrata)
+                    return LinkJumpT3;
+            }
+            // 4-byte conditional T4 with IT
+            const uint16_t* conditionalJumpT4Location = 
+            reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkConditionalJumpT4]));
+            if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) {
+                if (!mayTriggerErrata)
+                    return LinkConditionalJumpT4;
+            }
+        } else {
+            // 2-byte unconditional T2
+            const uint16_t* jumpT2Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT2]));
+            if (canBeJumpT2(jumpT2Location, to))
+                return LinkJumpT2;
+            // 4-byte unconditional T4
+            const uint16_t* jumpT4Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT4]));
+            if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) {
+                if (!mayTriggerErrata)
+                    return LinkJumpT4;
+            }
+            // use long jump sequence
+            return LinkBX;
+        }
+        
+        ASSERT(jumpType == JumpCondition);
+        return LinkConditionalBX;
+    }
+    
+    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+    {
+        JumpLinkType linkType = computeJumpType(record.type(), from, to);
+        record.setLinkType(linkType);
+        return linkType;
+    }
+    
+    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+    {
+        int32_t ptr = regionStart / sizeof(int32_t);
+        const int32_t end = regionEnd / sizeof(int32_t);
+        int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
+        while (ptr < end)
+            offsets[ptr++] = offset;
+    }
+    
+    Vector<LinkRecord>& jumpsToLink()
+    {
+        std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+        return m_jumpsToLink;
+    }
+
+    void link(LinkRecord& record, uint8_t* from, uint8_t* to)
+    {
+        switch (record.linkType()) {
+        case LinkJumpT1:
+            linkJumpT1(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+            break;
+        case LinkJumpT2:
+            linkJumpT2(reinterpret_cast<uint16_t*>(from), to);
+            break;
+        case LinkJumpT3:
+            linkJumpT3(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+            break;
+        case LinkJumpT4:
+            linkJumpT4(reinterpret_cast<uint16_t*>(from), to);
+            break;
+        case LinkConditionalJumpT4:
+            linkConditionalJumpT4(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+            break;
+        case LinkConditionalBX:
+            linkConditionalBX(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+            break;
+        case LinkBX:
+            linkBX(reinterpret_cast<uint16_t*>(from), to);
+            break;
+        default:
+            ASSERT_NOT_REACHED();
+            break;
         }
-        m_jumpsToLink.clear();
-
-        ASSERT(copy);
-        return copy;
     }
 
+    void* unlinkedCode() { return m_formatter.data(); }
+    
     static unsigned getCallReturnOffset(JmpSrc call)
     {
         ASSERT(call.m_offset >= 0);
@@ -1659,7 +1814,7 @@ public:
     {
         ASSERT(to.m_offset != -1);
         ASSERT(from.m_offset != -1);
-        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset));
+        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, from.m_type, from.m_condition));
     }
 
     static void linkJump(void* code, JmpSrc from, void* to)
@@ -1702,8 +1857,6 @@ public:
         ASSERT(reinterpret_cast<intptr_t>(to) & 1);
 
         setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
-
-        ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
     }
 
     static void repatchInt32(void* where, int32_t value)
@@ -1711,8 +1864,6 @@ public:
         ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
         
         setInt32(where, value);
-
-        ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
     }
 
     static void repatchPointer(void* where, void* value)
@@ -1720,8 +1871,6 @@ public:
         ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
         
         setPointer(where, value);
-
-        ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
     }
 
     static void repatchLoadPtrToLEA(void* where)
@@ -1862,19 +2011,38 @@ private:
         return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
     }
 
-    static void linkJumpAbsolute(uint16_t* instruction, void* target)
+    static bool canBeJumpT1(const uint16_t* instruction, const void* target)
     {
-        // FIMXE: this should be up in the MacroAssembler layer. :-(
-        const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
-
         ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
         ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
-
-        ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
-            || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) );
-
+        
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T1 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        return ((relative << 23) >> 23) == relative;
+    }
+    
+    static bool canBeJumpT2(const uint16_t* instruction, const void* target)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T2 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        return ((relative << 20) >> 20) == relative;
+    }
+    
+    static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        
         intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
-
         // From Cortex-A8 errata:
         // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
         // the target of the branch falls within the first region it is
@@ -1883,17 +2051,154 @@ private:
         // to enter a deadlock state.
         // The instruction is spanning two pages if it ends at an address ending 0x002
         bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
+        mayTriggerErrata = spansTwo4K;
         // The target is in the first page if the jump branch back by [3..0x1002] bytes
         bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
         bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
-
-        if (((relative << 7) >> 7) == relative && !wouldTriggerA8Errata) {
-            // ARM encoding for the top two bits below the sign bit is 'peculiar'.
-            if (relative >= 0)
-                relative ^= 0xC00000;
-
-            // All branch offsets should be an even distance.
-            ASSERT(!(relative & 1));
+        return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata;
+    }
+    
+    static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
+    {
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        // From Cortex-A8 errata:
+        // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
+        // the target of the branch falls within the first region it is
+        // possible for the processor to incorrectly determine the branch
+        // instruction, and it is also possible in some cases for the processor
+        // to enter a deadlock state.
+        // The instruction is spanning two pages if it ends at an address ending 0x002
+        bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
+        mayTriggerErrata = spansTwo4K;
+        // The target is in the first page if the jump branch back by [3..0x1002] bytes
+        bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
+        bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
+        return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata;
+    }
+    
+    void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        ASSERT(canBeJumpT1(instruction, target));
+        
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T1 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+    }
+    
+    static void linkJumpT2(uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        ASSERT(canBeJumpT2(instruction, target));
+        
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        // It does not appear to be documented in the ARM ARM (big surprise), but
+        // for OP_B_T2 the branch displacement encoded in the instruction is 2 
+        // less than the actual displacement.
+        relative -= 2;
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+    }
+    
+    void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        bool scratch;
+        UNUSED_PARAM(scratch);
+        ASSERT(canBeJumpT3(instruction, target, scratch));
+        
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+        instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+    }
+    
+    static void linkJumpT4(uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        bool scratch;
+        UNUSED_PARAM(scratch);
+        ASSERT(canBeJumpT4(instruction, target, scratch));
+        
+        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+        // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+        if (relative >= 0)
+            relative ^= 0xC00000;
+        
+        // All branch offsets should be an even distance.
+        ASSERT(!(relative & 1));
+        instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+        instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+    }
+    
+    void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        
+        instruction[-3] = ifThenElse(cond) | OP_IT;
+        linkJumpT4(instruction, target);
+    }
+    
+    static void linkBX(uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        
+        const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+        instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+        instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+        instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+        instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+        instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+    }
+    
+    void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(        
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        
+        linkBX(instruction, target);
+        instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+    }
+    
+    static void linkJumpAbsolute(uint16_t* instruction, void* target)
+    {
+        // FIMXE: this should be up in the MacroAssembler layer. :-(
+        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+        
+        ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+               || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+        
+        bool scratch;
+        if (canBeJumpT4(instruction, target, scratch)) {
             // There may be a better way to fix this, but right now put the NOPs first, since in the
             // case of an conditional branch this will be coming after an ITTT predicating *three*
             // instructions!  Looking backwards to modify the ITTT to an IT is not easy, due to
@@ -1902,9 +2207,9 @@ private:
             instruction[-5] = OP_NOP_T1;
             instruction[-4] = OP_NOP_T2a;
             instruction[-3] = OP_NOP_T2b;
-            instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
-            instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+            linkJumpT4(instruction, target);
         } else {
+            const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
             ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
             ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
             instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
@@ -1914,11 +2219,12 @@ private:
             instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
         }
     }
-
+    
     static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
     {
         return op | (imm.m_value.i << 10) | imm.m_value.imm4;
     }
+
     static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
     {
         return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
@@ -2035,6 +2341,7 @@ private:
     } m_formatter;
 
     Vector<LinkRecord> m_jumpsToLink;
+    Vector<int32_t> m_offsets;
 };
 
 } // namespace JSC
index aab908957ecd03611bc00414db564bed85d17db7..5db2cb9fcd8bd9acced94328766cdb2c71841dbf 100644 (file)
@@ -418,12 +418,6 @@ public:
 
 
     // Section 3: Misc admin methods
-
-    static CodePtr trampolineAt(CodeRef ref, Label label)
-    {
-        return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
-    }
-
     size_t size()
     {
         return m_assembler.size();
@@ -479,6 +473,9 @@ public:
     {
         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
     }
+    
+    void beginUninterruptedSequence() { }
+    void endUninterruptedSequence() { }
 
 protected:
     AssemblerType m_assembler;
index 47cac5ab63cc9ff952898df4c1dfa9b0ad974560..ae58946d97f13a705cecef26db337d6d14c07ba9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -49,25 +49,34 @@ namespace JSC {
 //
 class LinkBuffer : public Noncopyable {
     typedef MacroAssemblerCodeRef CodeRef;
+    typedef MacroAssemblerCodePtr CodePtr;
     typedef MacroAssembler::Label Label;
     typedef MacroAssembler::Jump Jump;
     typedef MacroAssembler::JumpList JumpList;
     typedef MacroAssembler::Call Call;
     typedef MacroAssembler::DataLabel32 DataLabel32;
     typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+    typedef MacroAssembler::JmpDst JmpDst;
+#if ENABLE(BRANCH_COMPACTION)
+    typedef MacroAssembler::LinkRecord LinkRecord;
+    typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
 
 public:
     // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
     //       First, executablePool is copied into m_executablePool, then the initialization of
     //       m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
-    LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
+    // The linkOffset parameter should only be non-null when recompiling for exception info
+    LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool, void* linkOffset)
         : m_executablePool(executablePool)
-        , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
-        , m_size(masm->m_assembler.size())
+        , m_size(0)
+        , m_code(0)
+        , m_assembler(masm)
 #ifndef NDEBUG
         , m_completed(false)
 #endif
     {
+        linkCode(linkOffset);
     }
 
     ~LinkBuffer()
@@ -80,28 +89,32 @@ public:
     void link(Call call, FunctionPtr function)
     {
         ASSERT(call.isFlagSet(Call::Linkable));
+        call.m_jmp = applyOffset(call.m_jmp);
         MacroAssembler::linkCall(code(), call, function);
     }
     
     void link(Jump jump, CodeLocationLabel label)
     {
+        jump.m_jmp = applyOffset(jump.m_jmp);
         MacroAssembler::linkJump(code(), jump, label);
     }
 
     void link(JumpList list, CodeLocationLabel label)
     {
         for (unsigned i = 0; i < list.m_jumps.size(); ++i)
-            MacroAssembler::linkJump(code(), list.m_jumps[i], label);
+            link(list.m_jumps[i], label);
     }
 
     void patch(DataLabelPtr label, void* value)
     {
-        MacroAssembler::linkPointer(code(), label.m_label, value);
+        JmpDst target = applyOffset(label.m_label);
+        MacroAssembler::linkPointer(code(), target, value);
     }
 
     void patch(DataLabelPtr label, CodeLocationLabel value)
     {
-        MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
+        JmpDst target = applyOffset(label.m_label);
+        MacroAssembler::linkPointer(code(), target, value.executableAddress());
     }
 
     // These methods are used to obtain handles to allow the code to be relinked / repatched later.
@@ -110,35 +123,36 @@ public:
     {
         ASSERT(call.isFlagSet(Call::Linkable));
         ASSERT(!call.isFlagSet(Call::Near));
-        return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+        return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp)));
     }
 
     CodeLocationNearCall locationOfNearCall(Call call)
     {
         ASSERT(call.isFlagSet(Call::Linkable));
         ASSERT(call.isFlagSet(Call::Near));
-        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+        return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp)));
     }
 
     CodeLocationLabel locationOf(Label label)
     {
-        return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
+        return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
     }
 
     CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
     {
-        return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
+        return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
     }
 
     CodeLocationDataLabel32 locationOf(DataLabel32 label)
     {
-        return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
+        return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
     }
 
     // This method obtains the return address of the call, given as an offset from
     // the start of the code.
     unsigned returnAddressOffset(Call call)
     {
+        call.m_jmp = applyOffset(call.m_jmp);
         return MacroAssembler::getLinkerCallReturnOffset(call);
     }
 
@@ -152,6 +166,7 @@ public:
 
         return CodeRef(m_code, m_executablePool, m_size);
     }
+
     CodeLocationLabel finalizeCodeAddendum()
     {
         performFinalization();
@@ -159,7 +174,20 @@ public:
         return CodeLocationLabel(code());
     }
 
+    CodePtr trampolineAt(Label label)
+    {
+        return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+    }
+
 private:
+    template <typename T> T applyOffset(T src)
+    {
+#if ENABLE(BRANCH_COMPACTION)
+        src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
+#endif
+        return src;
+    }
+    
     // Keep this private! - the underlying code should only be obtained externally via 
     // finalizeCode() or finalizeCodeAddendum().
     void* code()
@@ -167,6 +195,77 @@ private:
         return m_code;
     }
 
+    void linkCode(void* linkOffset)
+    {
+        UNUSED_PARAM(linkOffset);
+        ASSERT(!m_code);
+#if !ENABLE(BRANCH_COMPACTION)
+        m_code = m_assembler->m_assembler.executableCopy(m_executablePool.get());
+        m_size = m_assembler->size();
+#else
+        size_t initialSize = m_assembler->size();
+        m_code = (uint8_t*)m_executablePool->alloc(initialSize);
+        if (!m_code)
+            return;
+        ExecutableAllocator::makeWritable(m_code, m_assembler->size());
+        uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+        uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+        const uint8_t* linkBase = linkOffset ? reinterpret_cast<uint8_t*>(linkOffset) : outData;
+        int readPtr = 0;
+        int writePtr = 0;
+        Vector<LinkRecord>& jumpsToLink = m_assembler->jumpsToLink();
+        unsigned jumpCount = jumpsToLink.size();
+        for (unsigned i = 0; i < jumpCount; ++i) {
+            int offset = readPtr - writePtr;
+            ASSERT(!(offset & 1));
+            
+            // Copy the instructions from the last jump to the current one.
+            size_t regionSize = jumpsToLink[i].from() - readPtr;
+            memcpy(outData + writePtr, inData + readPtr, regionSize);
+            m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+            readPtr += regionSize;
+            writePtr += regionSize;
+            
+            // Calculate absolute address of the jump target, in the case of backwards
+            // branches we need to be precise, forward branches we are pessimistic
+            const uint8_t* target;
+            if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+                target = linkBase + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+            else
+                target = linkBase + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+            
+            JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], linkBase + writePtr, target);
+            // Compact branch if we can...
+            if (m_assembler->canCompact(jumpsToLink[i].type())) {
+                // Step back in the write stream
+                int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+                if (delta) {
+                    writePtr -= delta;
+                    m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+                }
+            }
+            jumpsToLink[i].setFrom(writePtr);
+        }
+        // Copy everything after the last jump
+        memcpy(outData + writePtr, inData + readPtr, m_assembler->size() - readPtr);
+        m_assembler->recordLinkOffsets(readPtr, m_assembler->size(), readPtr - writePtr);
+        
+        // Actually link everything (don't link if we've be given a linkoffset as it's a
+        // waste of time: linkOffset is used for recompiling to get exception info)
+        if (!linkOffset) {
+            for (unsigned i = 0; i < jumpCount; ++i) {
+                uint8_t* location = outData + jumpsToLink[i].from();
+                uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+                m_assembler->link(jumpsToLink[i], location, target);
+            }
+        }
+
+        jumpsToLink.clear();
+        m_size = writePtr + m_assembler->size() - readPtr;
+        m_executablePool->tryShrink(m_code, initialSize, m_size);
+#endif
+    }
+
     void performFinalization()
     {
 #ifndef NDEBUG
@@ -179,8 +278,9 @@ private:
     }
 
     RefPtr<ExecutablePool> m_executablePool;
-    void* m_code;
     size_t m_size;
+    void* m_code;
+    MacroAssembler* m_assembler;
 #ifndef NDEBUG
     bool m_completed;
 #endif
index 2a053d4896cf06df6126e56b3853a0f2320312d7..1bbb0cce9e903b187f542c579f1fec8cc016a082 100644 (file)
@@ -907,10 +907,18 @@ public:
         failureCases.append(branchTest32(Zero, dest));
     }
 
-    void zeroDouble(FPRegisterID srcDest)
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
     {
         m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
-        convertInt32ToDouble(ARMRegisters::S0, srcDest);
+        convertInt32ToDouble(ARMRegisters::S0, scratch);
+        return branchDouble(DoubleNotEqual, reg, scratch);
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+    {
+        m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
+        convertInt32ToDouble(ARMRegisters::S0, scratch);
+        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
     }
 
 protected:
index b9cc8561228b7864c77c2459c559f30e18b04f1d..e3e928d6eb8b96050ec9a7ee44183628d544e0de 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
  * Copyright (C) 2010 University of Szeged
  *
  * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,26 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
     inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
 
 public:
+    typedef ARMv7Assembler::LinkRecord LinkRecord;
+    typedef ARMv7Assembler::JumpType JumpType;
+    typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+
+    MacroAssemblerARMv7()
+        : m_inUninterruptedSequence(false)
+    {
+    }
+    
+    void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
+    void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
+    Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+    void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+    bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
+    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
+    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
+    void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+
     struct ArmAddress {
         enum AddressType {
             HasOffset,
@@ -651,7 +671,7 @@ public:
             Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
             Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
             unordered.link(this);
-            // We get here if either unordered, or equal.
+            // We get here if either unordered or equal.
             Jump result = makeJump();
             notEqual.link(this);
             return result;
@@ -682,9 +702,27 @@ public:
         failureCases.append(branchTest32(Zero, dest));
     }
 
-    void zeroDouble(FPRegisterID dest)
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
     {
-        m_assembler.vmov_F64_0(dest);
+        m_assembler.vcmpz_F64(reg);
+        m_assembler.vmrs();
+        Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+        Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+        unordered.link(this);
+        return result;
+    }
+
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+    {
+        m_assembler.vcmpz_F64(reg);
+        m_assembler.vmrs();
+        Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+        Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+        unordered.link(this);
+        // We get here if either unordered or equal.
+        Jump result = makeJump();
+        notEqual.link(this);
+        return result;
     }
 
     // Stack manipulation operations:
@@ -803,7 +841,7 @@ private:
             ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
             if (armImm.isValid())
                 m_assembler.cmp(left, armImm);
-            if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+            else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
                 m_assembler.cmn(left, armImm);
             else {
                 move(Imm32(imm), dataTempRegister);
@@ -969,14 +1007,14 @@ public:
 
     void jump(RegisterID target)
     {
-        m_assembler.bx(target);
+        m_assembler.bx(target, ARMv7Assembler::JumpFixed);
     }
 
     // Address is a memory location containing the address to jump to
     void jump(Address address)
     {
         load32(address, dataTempRegister);
-        m_assembler.bx(dataTempRegister);
+        m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed);
     }
 
 
@@ -1059,35 +1097,35 @@ public:
 
     void breakpoint()
     {
-        m_assembler.bkpt();
+        m_assembler.bkpt(0);
     }
 
     Call nearCall()
     {
         moveFixedWidthEncoding(Imm32(0), dataTempRegister);
-        return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+        return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::LinkableNear);
     }
 
     Call call()
     {
         moveFixedWidthEncoding(Imm32(0), dataTempRegister);
-        return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+        return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable);
     }
 
     Call call(RegisterID target)
     {
-        return Call(m_assembler.blx(target), Call::None);
+        return Call(m_assembler.blx(target, ARMv7Assembler::JumpFixed), Call::None);
     }
 
     Call call(Address address)
     {
         load32(address, dataTempRegister);
-        return Call(m_assembler.blx(dataTempRegister), Call::None);
+        return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::None);
     }
 
     void ret()
     {
-        m_assembler.bx(linkRegister);
+        m_assembler.bx(linkRegister, ARMv7Assembler::JumpFixed);
     }
 
     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
@@ -1187,7 +1225,7 @@ public:
     {
         // Like a normal call, but don't link.
         moveFixedWidthEncoding(Imm32(0), dataTempRegister);
-        return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+        return Call(m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable);
     }
 
     Call makeTailRecursiveCall(Jump oldJump)
@@ -1196,19 +1234,29 @@ public:
         return tailRecursiveCall();
     }
 
+    
+    int executableOffsetFor(int location)
+    {
+        return m_assembler.executableOffsetFor(location);
+    }
 
 protected:
+    bool inUninterruptedSequence()
+    {
+        return m_inUninterruptedSequence;
+    }
+
     ARMv7Assembler::JmpSrc makeJump()
     {
         moveFixedWidthEncoding(Imm32(0), dataTempRegister);
-        return m_assembler.bx(dataTempRegister);
+        return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
     }
 
     ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
     {
         m_assembler.it(cond, true, true);
         moveFixedWidthEncoding(Imm32(0), dataTempRegister);
-        return m_assembler.bx(dataTempRegister);
+        return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
     }
     ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
     ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
@@ -1298,6 +1346,8 @@ private:
     {
         ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
     }
+    
+    bool m_inUninterruptedSequence;
 };
 
 } // namespace JSC
index cb86da7e4589d7b6d29cf2ecaf388c7cb61f1f3b..0731065ef73785a6f54625d2cef25cb08e2287c7 100644 (file)
@@ -527,12 +527,19 @@ public:
         failureCases.append(m_assembler.jne());
     }
 
-    void zeroDouble(FPRegisterID srcDest)
+    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
     {
         ASSERT(isSSE2Present());
-        m_assembler.xorpd_rr(srcDest, srcDest);
+        m_assembler.xorpd_rr(scratch, scratch);
+        return branchDouble(DoubleNotEqual, reg, scratch);
     }
 
+    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+    {
+        ASSERT(isSSE2Present());
+        m_assembler.xorpd_rr(scratch, scratch);
+        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+    }
 
     // Stack manipulation operations:
     //
index 0bd81841fe81708c7be82efc629231797fbe0694..3667198dad5466e20548747449b5fa7925cc6c8c 100644 (file)
@@ -524,9 +524,29 @@ namespace JSC {
         bool m_regeneratingForExceptionInfo;
         CodeBlock* m_codeBlockBeingRegeneratedFrom;
 
-        static const unsigned s_maxEmitNodeDepth = 5000;
+        static const unsigned s_maxEmitNodeDepth = 3000;
+
+        friend class IncreaseEmitNodeDepth;
     };
 
+    class IncreaseEmitNodeDepth {
+    public:
+        IncreaseEmitNodeDepth(BytecodeGenerator& generator, unsigned count = 1)
+            : m_generator(generator)
+            , m_count(count)
+        {
+            m_generator.m_emitNodeDepth += count;
+        }
+
+        ~IncreaseEmitNodeDepth()
+        {
+            m_generator.m_emitNodeDepth -= m_count;
+        }
+
+    private:
+        BytecodeGenerator& m_generator;
+        unsigned m_count;
+    };
 }
 
 #endif // BytecodeGenerator_h
index 2cb781ff158a470ef627870fe15801cb8a3ca51a..a7455e49ca9d0665aef2c7ad3ea6a2f04257e986 100644 (file)
@@ -830,6 +830,8 @@ RegisterID* BinaryOpNode::emitStrcat(BytecodeGenerator& generator, RegisterID* d
     ASSERT(isAdd());
     ASSERT(resultDescriptor().definitelyIsString());
 
+    IncreaseEmitNodeDepth stackGuard(generator, 3);
+
     // Create a list of expressions for all the adds in the tree of nodes we can convert into
     // a string concatenation.  The rightmost node (c) is added first.  The rightmost node is
     // added first, and the leftmost child is never added, so the vector produced for the
@@ -1515,6 +1517,8 @@ RegisterID* ForNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
 
 RegisterID* ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
 {
+    IncreaseEmitNodeDepth stackGuard(generator);
+
     RefPtr<LabelScope> scope = generator.newLabelScope(LabelScope::Loop);
 
     if (!m_lexpr->isLocation())
@@ -1864,6 +1868,8 @@ RegisterID* TryNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
     // NOTE: The catch and finally blocks must be labeled explicitly, so the
     // optimizer knows they may be jumped to from anywhere.
 
+    IncreaseEmitNodeDepth stackGuard(generator);
+
     generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine());
 
     RefPtr<Label> tryStartLabel = generator.newLabel();
index cbcbd21a96e8e2d3e1226b3e7122aa1b1973d81c..39489e8e37087a9e2d2cf6f45057abf0a391c7e4 100644 (file)
@@ -85,7 +85,7 @@ void Debugger::recompileAllJSFunctions(JSGlobalData* globalData)
             continue;
 
         ExecState* exec = function->scope().globalObject()->JSGlobalObject::globalExec();
-        executable->recompile(exec);
+        executable->recompile();
         if (function->scope().globalObject()->debugger() == this)
             sourceProviders.add(executable->source().provider(), exec);
     }
index 445852b12a21d545e82f924f6a2e63a44f10d5d1..62d708d8e69d8c8b47099b1e83c9a36a14583372 100644 (file)
@@ -120,6 +120,13 @@ public:
         return poolAllocate(n);
     }
     
+    void tryShrink(void* allocation, size_t oldSize, size_t newSize)
+    {
+        if (static_cast<char*>(allocation) + oldSize != m_freePtr)
+            return;
+        m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
+    }
+
     ~ExecutablePool()
     {
         AllocationList::const_iterator end = m_pools.end();
@@ -129,6 +136,8 @@ public:
 
     size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
 
+    static bool underMemoryPressure();
+
 private:
     static Allocation systemAlloc(size_t n);
     static void systemRelease(const Allocation& alloc);
index 4c30058484d1a0ee93a2331c79c7bb811454efbe..3048e20cdc8d5da0c4f1658a6680542c7e5e9fca 100644 (file)
@@ -43,278 +43,363 @@ using namespace WTF;
 
 namespace JSC {
 
-#if CPU(X86_64)
-    // These limits suitable on 64-bit platforms (particularly x86-64, where we require all jumps to have a 2Gb max range).
-    #define VM_POOL_SIZE (2u * 1024u * 1024u * 1024u) // 2Gb
-    #define COALESCE_LIMIT (16u * 1024u * 1024u) // 16Mb
-#else
-    // These limits are hopefully sensible on embedded platforms.
-    #define VM_POOL_SIZE (32u * 1024u * 1024u) // 32Mb
-    #define COALESCE_LIMIT (4u * 1024u * 1024u) // 4Mb
-#endif
+#define TwoPow(n) (1ull << n)
 
-// ASLR currently only works on darwin (due to arc4random) & 64-bit (due to address space size).
-#define VM_POOL_ASLR (OS(DARWIN) && CPU(X86_64))
-
-// FreeListEntry describes a free chunk of memory, stored in the freeList.
-struct FreeListEntry {
-    FreeListEntry(void* pointer, size_t size)
-        : pointer(pointer)
-        , size(size)
-        , nextEntry(0)
-        , less(0)
-        , greater(0)
-        , balanceFactor(0)
+class AllocationTableSizeClass {
+public:
+    AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
+        : m_blockSize(blockSize)
     {
+        ASSERT(blockSize == TwoPow(log2BlockSize));
+
+        // Calculate the number of blocks needed to hold size.
+        size_t blockMask = blockSize - 1;
+        m_blockCount = (size + blockMask) >> log2BlockSize;
+
+        // Align to the smallest power of two >= m_blockCount.
+        m_blockAlignment = 1;
+        while (m_blockAlignment < m_blockCount)
+            m_blockAlignment += m_blockAlignment;
     }
 
-    // All entries of the same size share a single entry
-    // in the AVLTree, and are linked together in a linked
-    // list, using nextEntry.
-    void* pointer;
-    size_t size;
-    FreeListEntry* nextEntry;
+    size_t blockSize() const { return m_blockSize; }
+    size_t blockCount() const { return m_blockCount; }
+    size_t blockAlignment() const { return m_blockAlignment; }
 
-    // These fields are used by AVLTree.
-    FreeListEntry* less;
-    FreeListEntry* greater;
-    int balanceFactor;
-};
+    size_t size()
+    {
+        return m_blockSize * m_blockCount;
+    }
 
-// Abstractor class for use in AVLTree.
-// Nodes in the AVLTree are of type FreeListEntry, keyed on
-// (and thus sorted by) their size.
-struct AVLTreeAbstractorForFreeList {
-    typedef FreeListEntry* handle;
-    typedef int32_t size;
-    typedef size_t key;
-
-    handle get_less(handle h) { return h->less; }
-    void set_less(handle h, handle lh) { h->less = lh; }
-    handle get_greater(handle h) { return h->greater; }
-    void set_greater(handle h, handle gh) { h->greater = gh; }
-    int get_balance_factor(handle h) { return h->balanceFactor; }
-    void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
-
-    static handle null() { return 0; }
-
-    int compare_key_key(key va, key vb) { return va - vb; }
-    int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
-    int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
+private:
+    size_t m_blockSize;
+    size_t m_blockCount;
+    size_t m_blockAlignment;
 };
 
-// Used to reverse sort an array of FreeListEntry pointers.
-static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
-{
-    FreeListEntry* left = *(FreeListEntry**)leftPtr;
-    FreeListEntry* right = *(FreeListEntry**)rightPtr;
+template<unsigned log2Entries>
+class AllocationTableLeaf {
+    typedef uint64_t BitField;
 
-    return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
-}
+public:
+    static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
+    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
 
-// Used to reverse sort an array of pointers.
-static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
-{
-    void* left = *(void**)leftPtr;
-    void* right = *(void**)rightPtr;
+    static const size_t subregionSize = TwoPow(log2SubregionSize);
+    static const size_t regionSize = TwoPow(log2RegionSize);
+    static const unsigned entries = TwoPow(log2Entries);
+    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
 
-    return (intptr_t)right - (intptr_t)left;
-}
+    AllocationTableLeaf()
+        : m_allocated(0)
+    {
+    }
 
-class FixedVMPoolAllocator
-{
-    // The free list is stored in a sorted tree.
-    typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
+    ~AllocationTableLeaf()
+    {
+        ASSERT(isEmpty());
+    }
 
-    // Use madvise as apropriate to prevent freed pages from being spilled,
-    // and to attempt to ensure that used memory is reported correctly.
-#if HAVE(MADV_FREE_REUSE)
-    void release(void* position, size_t size)
+    size_t allocate(AllocationTableSizeClass& sizeClass)
     {
-        while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+        ASSERT(sizeClass.blockSize() == subregionSize);
+        ASSERT(!isFull());
+
+        size_t alignment = sizeClass.blockAlignment();
+        size_t count = sizeClass.blockCount();
+        // Use this mask to check for spans of free blocks.
+        BitField mask = ((1ull << count) - 1) << (alignment - count);
+
+        // Step in units of alignment size.
+        for (unsigned i = 0; i < entries; i += alignment) {
+            if (!(m_allocated & mask)) {
+                m_allocated |= mask;
+                return (i + (alignment - count)) << log2SubregionSize;
+            }
+            mask <<= alignment;
+        }
+        return notFound;
     }
 
-    void reuse(void* position, size_t size)
+    void free(size_t location, AllocationTableSizeClass& sizeClass)
     {
-        while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+        ASSERT(sizeClass.blockSize() == subregionSize);
+
+        size_t entry = location >> log2SubregionSize;
+        size_t count = sizeClass.blockCount();
+        BitField mask = ((1ull << count) - 1) << entry;
+
+        ASSERT((m_allocated & mask) == mask);
+        m_allocated &= ~mask;
     }
-#elif HAVE(MADV_FREE)
-    void release(void* position, size_t size)
+
+    bool isEmpty()
     {
-        while (madvise(position, size, MADV_FREE) == -1 && errno == EAGAIN) { }
+        return !m_allocated;
     }
-    
-    void reuse(void*, size_t) {}
-#elif HAVE(MADV_DONTNEED)
-    void release(void* position, size_t size)
+
+    bool isFull()
     {
-        while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+        return !~m_allocated;
     }
 
-    void reuse(void*, size_t) {}
-#else
-    void release(void*, size_t) {}
-    void reuse(void*, size_t) {}
+    static size_t size()
+    {
+        return regionSize;
+    }
+
+    static AllocationTableSizeClass classForSize(size_t size)
+    {
+        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
+    }
+
+#ifndef NDEBUG
+    void dump(size_t parentOffset = 0, unsigned indent = 0)
+    {
+        for (unsigned i = 0; i < indent; ++i)
+            fprintf(stderr, "    ");
+        fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
+    }
 #endif
 
-    // All addition to the free list should go through this method, rather than
-    // calling insert directly, to avoid multiple entries beging added with the
-    // same key.  All nodes being added should be singletons, they should not
-    // already be a part of a chain.
-    void addToFreeList(FreeListEntry* entry)
-    {
-        ASSERT(!entry->nextEntry);
-
-        if (entry->size == m_commonSize) {
-            m_commonSizedAllocations.append(entry->pointer);
-            delete entry;
-        } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
-            // m_freeList already contain an entry for this size - insert this node into the chain.
-            entry->nextEntry = entryInFreeList->nextEntry;
-            entryInFreeList->nextEntry = entry;
-        } else
-            m_freeList.insert(entry);
-    }
-
-    // We do not attempt to coalesce addition, which may lead to fragmentation;
-    // instead we periodically perform a sweep to try to coalesce neigboring
-    // entries in m_freeList.  Presently this is triggered at the point 16MB
-    // of memory has been released.
-    void coalesceFreeSpace()
-    {
-        Vector<FreeListEntry*> freeListEntries;
-        SizeSortedFreeTree::Iterator iter;
-        iter.start_iter_least(m_freeList);
-
-        // Empty m_freeList into a Vector.
-        for (FreeListEntry* entry; (entry = *iter); ++iter) {
-            // Each entry in m_freeList might correspond to multiple
-            // free chunks of memory (of the same size).  Walk the chain
-            // (this is likely of couse only be one entry long!) adding
-            // each entry to the Vector (at reseting the next in chain
-            // pointer to separate each node out).
-            FreeListEntry* next;
-            do {
-                next = entry->nextEntry;
-                entry->nextEntry = 0;
-                freeListEntries.append(entry);
-            } while ((entry = next));
+private:
+    BitField m_allocated;
+};
+
+
+template<class NextLevel>
+class LazyAllocationTable {
+public:
+    static const unsigned log2RegionSize = NextLevel::log2RegionSize;
+    static const unsigned entries = NextLevel::entries;
+
+    LazyAllocationTable()
+        : m_ptr(0)
+    {
+    }
+
+    ~LazyAllocationTable()
+    {
+        ASSERT(isEmpty());
+    }
+
+    size_t allocate(AllocationTableSizeClass& sizeClass)
+    {
+        if (!m_ptr)
+            m_ptr = new NextLevel();
+        return m_ptr->allocate(sizeClass);
+    }
+
+    void free(size_t location, AllocationTableSizeClass& sizeClass)
+    {
+        ASSERT(m_ptr);
+        m_ptr->free(location, sizeClass);
+        if (m_ptr->isEmpty()) {
+            delete m_ptr;
+            m_ptr = 0;
         }
-        // All entries are now in the Vector; purge the tree.
-        m_freeList.purge();
-
-        // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
-        // We reverse-sort so that we can logically work forwards through memory,
-        // whilst popping items off the end of the Vectors using last() and removeLast().
-        qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
-        qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
-
-        // The entries from m_commonSizedAllocations that cannot be
-        // coalesced into larger chunks will be temporarily stored here.
-        Vector<void*> newCommonSizedAllocations;
-
-        // Keep processing so long as entries remain in either of the vectors.
-        while (freeListEntries.size() || m_commonSizedAllocations.size()) {
-            // We're going to try to find a FreeListEntry node that we can coalesce onto.
-            FreeListEntry* coalescionEntry = 0;
-
-            // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
-            if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
-                // Pop an item from the m_commonSizedAllocations vector - this is the lowest
-                // addressed free chunk.  Find out the begin and end addresses of the memory chunk.
-                void* begin = m_commonSizedAllocations.last();
-                void* end = (void*)((intptr_t)begin + m_commonSize);
-                m_commonSizedAllocations.removeLast();
-
-                // Try to find another free chunk abutting onto the end of the one we have already found.
-                if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
-                    // There is an existing FreeListEntry for the next chunk of memory!
-                    // we can reuse this.  Pop it off the end of m_freeList.
-                    coalescionEntry = freeListEntries.last();
-                    freeListEntries.removeLast();
-                    // Update the existing node to include the common-sized chunk that we also found. 
-                    coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
-                    coalescionEntry->size += m_commonSize;
-                } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
-                    // There is a second common-sized chunk that can be coalesced.
-                    // Allocate a new node.
-                    m_commonSizedAllocations.removeLast();
-                    coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
-                } else {
-                    // Nope - this poor little guy is all on his own. :-(
-                    // Add him into the newCommonSizedAllocations vector for now, we're
-                    // going to end up adding him back into the m_commonSizedAllocations
-                    // list when we're done.
-                    newCommonSizedAllocations.append(begin);
+    }
+
+    bool isEmpty()
+    {
+        return !m_ptr;
+    }
+
+    bool isFull()
+    {
+        return m_ptr && m_ptr->isFull();
+    }
+
+    static size_t size()
+    {
+        return NextLevel::size();
+    }
+
+#ifndef NDEBUG
+    void dump(size_t parentOffset = 0, unsigned indent = 0)
+    {
+        ASSERT(m_ptr);
+        m_ptr->dump(parentOffset, indent);
+    }
+#endif
+
+    static AllocationTableSizeClass classForSize(size_t size)
+    {
+        return NextLevel::classForSize(size);
+    }
+
+private:
+    NextLevel* m_ptr;
+};
+
+template<class NextLevel, unsigned log2Entries>
+class AllocationTableDirectory {
+    typedef uint64_t BitField;
+
+public:
+    static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
+    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
+
+    static const size_t subregionSize = TwoPow(log2SubregionSize);
+    static const size_t regionSize = TwoPow(log2RegionSize);
+    static const unsigned entries = TwoPow(log2Entries);
+    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
+
+    AllocationTableDirectory()
+        : m_full(0)
+        , m_hasSuballocation(0)
+    {
+    }
+
+    ~AllocationTableDirectory()
+    {
+        ASSERT(isEmpty());
+    }
+
+    size_t allocate(AllocationTableSizeClass& sizeClass)
+    {
+        ASSERT(sizeClass.blockSize() <= subregionSize);
+        ASSERT(!isFull());
+
+        if (sizeClass.blockSize() < subregionSize) {
+            BitField bit = 1;
+            for (unsigned i = 0; i < entries; ++i, bit += bit) {
+                if (m_full & bit)
                     continue;
+                size_t location = m_suballocations[i].allocate(sizeClass);
+                if (location != notFound) {
+                    // If this didn't already have a subregion, it does now!
+                    m_hasSuballocation |= bit;
+                    // Mirror the suballocation's full bit.
+                    if (m_suballocations[i].isFull())
+                        m_full |= bit;
+                    return (i * subregionSize) + location;
                 }
-            } else {
-                ASSERT(freeListEntries.size());
-                ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
-                // The lowest addressed item is from m_freeList; pop it from the Vector.
-                coalescionEntry = freeListEntries.last();
-                freeListEntries.removeLast();
             }
-            
-            // Right, we have a FreeListEntry, we just need check if there is anything else
-            // to coalesce onto the end.
-            ASSERT(coalescionEntry);
-            while (true) {
-                // Calculate the end address of the chunk we have found so far.
-                void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
-
-                // Is there another chunk adjacent to the one we already have?
-                if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
-                    // Yes - another FreeListEntry -pop it from the list.
-                    FreeListEntry* coalescee = freeListEntries.last();
-                    freeListEntries.removeLast();
-                    // Add it's size onto our existing node.
-                    coalescionEntry->size += coalescee->size;
-                    delete coalescee;
-                } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
-                    // We can coalesce the next common-sized chunk.
-                    m_commonSizedAllocations.removeLast();
-                    coalescionEntry->size += m_commonSize;
-                } else
-                    break; // Nope, nothing to be added - stop here.
+            return notFound;
+        }
+
+        // A block is allocated if either it is fully allocated or contains suballocations.
+        BitField allocated = m_full | m_hasSuballocation;
+
+        size_t alignment = sizeClass.blockAlignment();
+        size_t count = sizeClass.blockCount();
+        // Use this mask to check for spans of free blocks.
+        BitField mask = ((1ull << count) - 1) << (alignment - count);
+
+        // Step in units of alignment size.
+        for (unsigned i = 0; i < entries; i += alignment) {
+            if (!(allocated & mask)) {
+                m_full |= mask;
+                return (i + (alignment - count)) << log2SubregionSize;
             }
+            mask <<= alignment;
+        }
+        return notFound;
+    }
 
-            // We've coalesced everything we can onto the current chunk.
-            // Add it back into m_freeList.
-            addToFreeList(coalescionEntry);
+    void free(size_t location, AllocationTableSizeClass& sizeClass)
+    {
+        ASSERT(sizeClass.blockSize() <= subregionSize);
+
+        size_t entry = location >> log2SubregionSize;
+
+        if (sizeClass.blockSize() < subregionSize) {
+            BitField bit = 1ull << entry;
+            m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
+            // Check if the suballocation is now empty.
+            if (m_suballocations[entry].isEmpty())
+                m_hasSuballocation &= ~bit;
+            // No need to check, it clearly isn't full any more!
+            m_full &= ~bit;
+        } else {
+            size_t count = sizeClass.blockCount();
+            BitField mask = ((1ull << count) - 1) << entry;
+            ASSERT((m_full & mask) == mask);
+            ASSERT(!(m_hasSuballocation & mask));
+            m_full &= ~mask;
         }
+    }
 
-        // All chunks of free memory larger than m_commonSize should be
-        // back in m_freeList by now.  All that remains to be done is to
-        // copy the contents on the newCommonSizedAllocations back into
-        // the m_commonSizedAllocations Vector.
-        ASSERT(m_commonSizedAllocations.size() == 0);
-        m_commonSizedAllocations.append(newCommonSizedAllocations);
+    bool isEmpty()
+    {
+        return !(m_full | m_hasSuballocation);
     }
 
-public:
+    bool isFull()
+    {   
+        return !~m_full;
+    }
+
+    static size_t size()
+    {
+        return regionSize;
+    }
 
-    FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
-        : m_commonSize(commonSize)
-        , m_countFreedSinceLastCoalesce(0)
-        , m_totalHeapSize(totalHeapSize)
-    {
-        // Cook up an address to allocate at, using the following recipe:
-        //   17 bits of zero, stay in userspace kids.
-        //   26 bits of randomness for ASLR.
-        //   21 bits of zero, at least stay aligned within one level of the pagetables.
-        //
-        // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
-        // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
-        // 2^24, which should put up somewhere in the middle of usespace (in the address range
-        // 0x200000000000 .. 0x5fffffffffff).
-        intptr_t randomLocation = 0;
-#if VM_POOL_ASLR
-        randomLocation = arc4random() & ((1 << 25) - 1);
-        randomLocation += (1 << 24);
-        randomLocation <<= 21;
+    static AllocationTableSizeClass classForSize(size_t size)
+    {
+        if (size < subregionSize) {
+            AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
+            if (sizeClass.size() < NextLevel::size())
+                return sizeClass;
+        }
+        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
+    }
+
+#ifndef NDEBUG
+    void dump(size_t parentOffset = 0, unsigned indent = 0)
+    {
+        for (unsigned i = 0; i < indent; ++i)
+            fprintf(stderr, "    ");
+        fprintf(stderr, "%08x: [", (int)parentOffset);
+        for (unsigned i = 0; i < entries; ++i) {
+            BitField bit = 1ull << i;
+            char c = m_hasSuballocation & bit
+                ? (m_full & bit ? 'N' : 'n')
+                : (m_full & bit ? 'F' : '-');
+            fprintf(stderr, "%c", c);
+        }
+        fprintf(stderr, "]\n");
+
+        for (unsigned i = 0; i < entries; ++i) {
+            BitField bit = 1ull << i;
+            size_t offset = parentOffset | (subregionSize * i);
+            if (m_hasSuballocation & bit)
+                m_suballocations[i].dump(offset, indent + 1);
+        }
+    }
+#endif
+
+private:
+    NextLevel m_suballocations[entries];
+    // Subregions exist in one of four states:
+    // (1) empty (both bits clear)
+    // (2) fully allocated as a single allocation (m_full set)
+    // (3) partially allocated through suballocations (m_hasSuballocation set)
+    // (4) fully allocated through suballocations (both bits set)
+    BitField m_full;
+    BitField m_hasSuballocation;
+};
+
+typedef AllocationTableLeaf<6> PageTables256KB;
+typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
+typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
+typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
+
+#if CPU(ARM)
+typedef PageTables16MB FixedVMPoolPageTables;
+#elif CPU(X86_64) && !OS(LINUX)
+typedef PageTables1GB FixedVMPoolPageTables;
+#else
+typedef PageTables32MB FixedVMPoolPageTables;
 #endif
-        m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MMAP_FLAGS, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
+
+class FixedVMPoolAllocator
+{
+public:
+    FixedVMPoolAllocator()
+    {
+        m_base = mmap(0, FixedVMPoolPageTables::size(), INITIAL_PROTECTION_FLAGS, MMAP_FLAGS, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
 
         if (m_base == MAP_FAILED) {
 #if ENABLE(INTERPRETER)
@@ -328,127 +413,95 @@ public:
             // worrying about it's previous state, and also makes coalescing m_freeList
             // simpler since we need not worry about the possibility of coalescing released
             // chunks with non-released ones.
-            release(m_base, m_totalHeapSize);
-            m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
+            release(m_base, FixedVMPoolPageTables::size());
         }
     }
-
     void* alloc(size_t size)
     {
-#if ENABLE(INTERPRETER)
-        if (!m_base)
-            return 0;
-#else
-        ASSERT(m_base);
-#endif
-        void* result;
-
-        // Freed allocations of the common size are not stored back into the main
-        // m_freeList, but are instead stored in a separate vector.  If the request
-        // is for a common sized allocation, check this list.
-        if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
-            result = m_commonSizedAllocations.last();
-            m_commonSizedAllocations.removeLast();
-        } else {
-            // Serach m_freeList for a suitable sized chunk to allocate memory from.
-            FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
-
-            // This would be bad news.
-            if (!entry) {
-                // Errk!  Lets take a last-ditch desparation attempt at defragmentation...
-                coalesceFreeSpace();
-                // Did that free up a large enough chunk?
-                entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
-                // No?...  *BOOM!*
-                if (!entry)
-                    CRASH();
-            }
-            ASSERT(entry->size != m_commonSize);
-
-            // Remove the entry from m_freeList.  But! -
-            // Each entry in the tree may represent a chain of multiple chunks of the
-            // same size, and we only want to remove one on them.  So, if this entry
-            // does have a chain, just remove the first-but-one item from the chain.
-            if (FreeListEntry* next = entry->nextEntry) {
-                // We're going to leave 'entry' in the tree; remove 'next' from its chain.
-                entry->nextEntry = next->nextEntry;
-                next->nextEntry = 0;
-                entry = next;
-            } else
-                m_freeList.remove(entry->size);
-
-            // Whoo!, we have a result!
-            ASSERT(entry->size >= size);
-            result = entry->pointer;
-
-            // If the allocation exactly fits the chunk we found in the,
-            // m_freeList then the FreeListEntry node is no longer needed.
-            if (entry->size == size)
-                delete entry;
-            else {
-                // There is memory left over, and it is not of the common size.
-                // We can reuse the existing FreeListEntry node to add this back
-                // into m_freeList.
-                entry->pointer = (void*)((intptr_t)entry->pointer + size);
-                entry->size -= size;
-                addToFreeList(entry);
-            }
-        }
+        ASSERT(size);
+        AllocationTableSizeClass sizeClass = classForSize(size);
+        ASSERT(sizeClass.size());
+        if (sizeClass.size() >= FixedVMPoolPageTables::size())
+            CRASH();
+
+        if (m_pages.isFull())
+            CRASH();
+        size_t offset = m_pages.allocate(sizeClass);
+        if (offset == notFound)
+            CRASH();
 
-        // Call reuse to report to the operating system that this memory is in use.
-        ASSERT(isWithinVMPool(result, size));
+        void* result = offsetToPointer(offset);
         reuse(result, size);
         return result;
     }
 
     void free(void* pointer, size_t size)
     {
-        ASSERT(m_base);
-        // Call release to report to the operating system that this
-        // memory is no longer in use, and need not be paged out.
-        ASSERT(isWithinVMPool(pointer, size));
         release(pointer, size);
 
-        // Common-sized allocations are stored in the m_commonSizedAllocations
-        // vector; all other freed chunks are added to m_freeList.
-        if (size == m_commonSize)
-            m_commonSizedAllocations.append(pointer);
-        else
-            addToFreeList(new FreeListEntry(pointer, size));
-
-        // Do some housekeeping.  Every time we reach a point that
-        // 16MB of allocations have been freed, sweep m_freeList
-        // coalescing any neighboring fragments.
-        m_countFreedSinceLastCoalesce += size;
-        if (m_countFreedSinceLastCoalesce >= COALESCE_LIMIT) {
-            m_countFreedSinceLastCoalesce = 0;
-            coalesceFreeSpace();
-        }
+        ASSERT(size);
+        AllocationTableSizeClass sizeClass = classForSize(size);
+        ASSERT(sizeClass.size());
+        ASSERT(sizeClass.size() < FixedVMPoolPageTables::size());
+
+        m_pages.free(pointerToOffset(pointer), sizeClass);
     }
 
-    bool isValid() const { return !!m_base; }
+    bool isValid() const
+    {
+        return !!m_base;
+    }
 
 private:
+    // Use madvise as apropriate to prevent freed pages from being spilled,
+    // and to attempt to ensure that used memory is reported correctly.
+#if HAVE(MADV_FREE_REUSE)
+    void release(void* position, size_t size)
+    {
+        while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+    }
 
-#ifndef NDEBUG
-    bool isWithinVMPool(void* pointer, size_t size)
+    void reuse(void* position, size_t size)
+    {
+        while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+    }
+#elif HAVE(MADV_FREE)
+    void release(void* position, size_t size)
+    {
+        while (madvise(position, size, MADV_FREE) == -1 && errno == EAGAIN) { }
+    }
+    
+    void reuse(void*, size_t) {}
+#elif HAVE(MADV_DONTNEED)
+    void release(void* position, size_t size)
     {
-        return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
+        while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
     }
+
+    void reuse(void*, size_t) {}
+#else
+    void release(void*, size_t) {}
+    void reuse(void*, size_t) {}
 #endif
 
-    // Freed space from the most common sized allocations will be held in this list, ...
-    const size_t m_commonSize;
-    Vector<void*> m_commonSizedAllocations;
+    AllocationTableSizeClass classForSize(size_t size)
+    {
+        return FixedVMPoolPageTables::classForSize(size);
+    }
 
-    // ... and all other freed allocations are held in m_freeList.
-    SizeSortedFreeTree m_freeList;
+    void* offsetToPointer(size_t offset)
+    {
+        return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_base) + offset);
+    }
 
-    // This is used for housekeeping, to trigger defragmentation of the freed lists.
-    size_t m_countFreedSinceLastCoalesce;
+    size_t pointerToOffset(void* pointer)
+    {
+        return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_base);
+    }
 
     void* m_base;
-    size_t m_totalHeapSize;
+    FixedVMPoolPageTables m_pages;
 };
 
 void ExecutableAllocator::intializePageSize()
@@ -457,13 +510,14 @@ void ExecutableAllocator::intializePageSize()
 }
 
 static FixedVMPoolAllocator* allocator = 0;
+static size_t allocatedCount = 0;
 static SpinLock spinlock = SPINLOCK_INITIALIZER;
 
 bool ExecutableAllocator::isValid() const
 {
     SpinLockHolder lock_holder(&spinlock);
     if (!allocator)
-        allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE);
+        allocator = new FixedVMPoolAllocator();
     return allocator->isValid();
 }
 
@@ -472,8 +526,9 @@ ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
     SpinLockHolder lock_holder(&spinlock);
 
     if (!allocator)
-        allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE);
+        allocator = new FixedVMPoolAllocator();
     ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
+    allocatedCount += size;
     return alloc;
 }
 
@@ -483,6 +538,15 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation)
 
     ASSERT(allocator);
     allocator->free(allocation.pages, allocation.size);
+    allocatedCount -= allocation.size;
+}
+
+bool ExecutablePool::underMemoryPressure()
+{
+    // Technically we should take the spin lock here, but we don't
+    // care if we get stale data.  This is only really a heuristic
+    // anyway.
+    return allocatedCount > (FixedVMPoolPageTables::size() / 2);
 }
 
 }
index 9de8236d66a7671779990dfc77d9c61d88beb6a0..0a1b8df822b37835b93e198ccc497013092bfdae 100644 (file)
@@ -55,6 +55,11 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
     ASSERT_UNUSED(result, !result);
 }
 
+bool ExecutablePool::underMemoryPressure()
+{
+    return false;
+}
+
 bool ExecutableAllocator::isValid() const
 {
     return true;
index 2b13529ee03a2bdea589f296540e751a15cfda15..19179d49992e64c2c97425baeefcf632b7fce444 100644 (file)
@@ -54,6 +54,11 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
     VirtualFree(alloc.pages, 0, MEM_RELEASE); 
 }
 
+bool ExecutablePool::underMemoryPressure()
+{
+    return false;
+}
+
 bool ExecutableAllocator::isValid() const
 {
     return true;
index 00f0d23e715697f00ad5f30ef1bfa1d841e8b345..d3330d82a19b5894df852fc28c080cac0643df98 100644 (file)
@@ -71,7 +71,7 @@ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAd
     repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
 }
 
-JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset)
     : m_interpreter(globalData->interpreter)
     , m_globalData(globalData)
     , m_codeBlock(codeBlock)
@@ -89,6 +89,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
     , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
     , m_jumpTargetsPosition(0)
 #endif
+    , m_linkerOffset(linkerOffset)
 {
 }
 
@@ -494,7 +495,10 @@ JITCode JIT::privateCompile()
 
     ASSERT(m_jmpTable.isEmpty());
 
-    LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+    RefPtr<ExecutablePool> executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size());
+    if (!executablePool)
+        return JITCode();
+    LinkBuffer patchBuffer(this, executablePool.release(), m_linkerOffset);
 
     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
     for (unsigned i = 0; i < m_switches.size(); ++i) {
index 83f2c0dfb2165ba4c4b382c42812a70a24de781b..ff72c80f74d3e1e3b4be3698d1d0227785c4ec7d 100644 (file)
--- a/jit/JIT.h
+++ b/jit/JIT.h
@@ -178,9 +178,9 @@ namespace JSC {
         static const int patchGetByIdDefaultOffset = 256;
 
     public:
-        static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock)
+        static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, void* offsetBase = 0)
         {
-            return JIT(globalData, codeBlock).privateCompile();
+            return JIT(globalData, codeBlock, offsetBase).privateCompile();
         }
 
         static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -221,7 +221,7 @@ namespace JSC {
         {
             if (!globalData->canUseJIT())
                 return;
-            JIT jit(globalData);
+            JIT jit(globalData, 0, 0);
             jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
         }
 
@@ -250,7 +250,7 @@ namespace JSC {
             }
         };
 
-        JIT(JSGlobalData*, CodeBlock* = 0);
+        JIT(JSGlobalData*, CodeBlock* = 0, void* = 0);
 
         void privateCompileMainPass();
         void privateCompileLinkPass();
@@ -666,16 +666,16 @@ namespace JSC {
 #endif
 #endif // USE(JSVALUE32_64)
 
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
-#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); endUninterruptedSequence(); } while (false)
 
         void beginUninterruptedSequence(int, int);
         void endUninterruptedSequence(int, int);
 
 #else
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
-#define END_UNINTERRUPTED_SEQUENCE(name)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name)  do { beginUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name)  do { endUninterruptedSequence(); } while (false)
 #endif
 
         void emit_op_add(Instruction*);
@@ -947,6 +947,7 @@ namespace JSC {
         int m_uninterruptedConstantSequenceBegin;
 #endif
 #endif
+        void* m_linkerOffset;
         static PassRefPtr<NativeExecutable> stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
     } JIT_CLASS_ALIGNMENT;
 
index 217aac0349a1620148beb31168492c51f6993dc2..ccc0900326f1fa353ce4c5b2e5ddeca523d902da 100644 (file)
@@ -380,7 +380,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
 #endif
 
     // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
-    LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+    LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0);
 
 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
     patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
@@ -398,22 +398,22 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
     CodeRef finalCode = patchBuffer.finalizeCode();
     *executablePool = finalCode.m_executablePool;
 
-    trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+    trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
 #if ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-    trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
+    trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(patchBuffer.trampolineAt(nativeCallThunk)))));
 #endif
 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-    trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+    trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
 #else
     UNUSED_PARAM(ctiStringLengthTrampoline);
 #endif
 #if ENABLE(JIT_OPTIMIZE_CALL)
-    trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+    trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
 #else
     UNUSED_PARAM(ctiVirtualCallLink);
 #endif
 #if ENABLE(JIT_OPTIMIZE_MOD)
-    trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
+    trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
 #endif
 }
 
@@ -796,9 +796,8 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
 
         addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
 
-        zeroDouble(fpRegT0);
-        emitLoadDouble(cond, fpRegT1);
-        addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
+        emitLoadDouble(cond, fpRegT0);
+        addJump(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target);
     } else
         addSlowCase(isNotInteger);
 
@@ -837,9 +836,8 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
 
         addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
 
-        zeroDouble(fpRegT0);
-        emitLoadDouble(cond, fpRegT1);
-        addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
+        emitLoadDouble(cond, fpRegT0);
+        addJump(branchDoubleNonZero(fpRegT0, fpRegT1), target);
     } else
         addSlowCase(isNotInteger);
 
index df969fa185fc674e2a54a2ba1430444ab70bc56e..1b2b4dd005927a0a63c8eca5dc4e222a26e8ac8b 100644 (file)
@@ -84,7 +84,7 @@ PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* glob
     jit.move(Imm32(0), regT0);
     jit.ret();
     
-    LinkBuffer patchBuffer(&jit, pool);
+    LinkBuffer patchBuffer(&jit, pool, 0);
     return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
 }
 
@@ -650,7 +650,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
     restoreArgumentReferenceForTrampoline();
     Call failureCall = tailRecursiveCall();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
 
     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
 
@@ -741,7 +741,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     emitFastArithIntToImmNoCheck(regT2, regT0);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
 
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -804,7 +804,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
     } else
         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
     Jump success = jump();
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
 
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -861,7 +861,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
         compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
 
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -932,7 +932,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
 
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
 
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1009,7 +1009,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1087,7 +1087,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
 
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
index 791c88c562850c5c971d66ef480212c7d9196312..059c9c5c9382264503ef6ebd744e58e16787004b 100644 (file)
@@ -297,7 +297,7 @@ PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* glob
     jit.move(Imm32(0), regT0);
     jit.ret();
     
-    LinkBuffer patchBuffer(&jit, pool);
+    LinkBuffer patchBuffer(&jit, pool, 0);
     return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
 }
 
@@ -646,7 +646,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
     restoreArgumentReferenceForTrampoline();
     Call failureCall = tailRecursiveCall();
     
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     
     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
     
@@ -741,7 +741,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     move(Imm32(JSValue::Int32Tag), regT1);
     Jump success = jump();
     
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -805,7 +805,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
     
     Jump success = jump();
     
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -866,7 +866,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
 
     Jump success = jump();
     
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
@@ -936,7 +936,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
     
     Jump success = jump();
     
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
@@ -1013,7 +1013,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
 
     Jump success = jump();
     
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
@@ -1090,7 +1090,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
         compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
     Jump success = jump();
     
-    LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
index a05be67206ef5a615ed09a23708edb65711c6363..6f2b120d3e45632ab3b450b71055afc4aecbc0c4 100644 (file)
@@ -129,7 +129,7 @@ namespace JSC {
         
         PassRefPtr<NativeExecutable> finalize()
         {
-            LinkBuffer patchBuffer(this, m_pool.get());
+            LinkBuffer patchBuffer(this, m_pool.get(), 0);
             patchBuffer.link(m_failures, CodeLocationLabel(m_globalData->jitStubs->ctiNativeCallThunk()->generatedJITCode().addressForCall()));
             return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
         }
index 05f2bb5628de669fcfa25492b079855d19ed2d58..2789e50a2aea5be9098f0957ac0a570dc93f1dc4 100644 (file)
@@ -1002,6 +1002,33 @@ void Heap::markProtectedObjects(MarkStack& markStack)
     }
 }
 
+void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
+{
+    m_tempSortingVectors.append(tempVector);
+}
+
+void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
+{
+    ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
+    m_tempSortingVectors.removeLast();
+}
+    
+void Heap::markTempSortVectors(MarkStack& markStack)
+{
+    typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
+
+    VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
+    for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
+        Vector<ValueStringPair>* tempSortingVector = *it;
+
+        Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
+        for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt)
+            if (vectorIt->first)
+                markStack.append(vectorIt->first);
+        markStack.drain();
+    }
+}
+    
 void Heap::clearMarkBits()
 {
     for (size_t i = 0; i < m_heap.usedBlocks; ++i)
@@ -1089,6 +1116,9 @@ void Heap::markRoots()
 
     // Mark explicitly registered roots.
     markProtectedObjects(markStack);
+    
+    // Mark temporary vector for Array sorting
+    markTempSortVectors(markStack);
 
     // Mark misc. other roots.
     if (m_markListSet && m_markListSet->size())
index 6599652035902be5637d23b2686cbee9bd3703c6..12893a3e60903d7b08fa1031cef8ad4cdac0b73a 100644 (file)
@@ -22,6 +22,7 @@
 #ifndef Collector_h
 #define Collector_h
 
+#include "JSValue.h"
 #include <stddef.h>
 #include <string.h>
 #include <wtf/HashCountedSet.h>
@@ -115,6 +116,9 @@ namespace JSC {
 
         void markConservatively(MarkStack&, void* start, void* end);
 
+        void pushTempSortVector(WTF::Vector<ValueStringPair>*);
+        void popTempSortVector(WTF::Vector<ValueStringPair>*);        
+
         HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = new HashSet<MarkedArgumentBuffer*>; return *m_markListSet; }
 
         JSGlobalData* globalData() const { return m_globalData; }
@@ -150,6 +154,7 @@ namespace JSC {
 
         void markRoots();
         void markProtectedObjects(MarkStack&);
+        void markTempSortVectors(MarkStack&);
         void markCurrentThreadConservatively(MarkStack&);
         void markCurrentThreadConservativelyInternal(MarkStack&);
         void markOtherThreadConservatively(MarkStack&, Thread*);
@@ -160,6 +165,7 @@ namespace JSC {
         CollectorHeap m_heap;
 
         ProtectCountSet m_protectedValues;
+        WTF::Vector<WTF::Vector<ValueStringPair>* > m_tempSortingVectors;
 
         HashSet<MarkedArgumentBuffer*>* m_markListSet;
 
index 1916a234b86de813dd1df7ec0aa6d569198370b3..765bd99173f44c05751cae7274709d7912428ab9 100644 (file)
@@ -211,7 +211,7 @@ ExceptionInfo* FunctionExecutable::reparseExceptionInfo(JSGlobalData* globalData
     if (globalData->canUseJIT())
 #endif
     {
-        JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+        JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), generatedJITCode().start());
         ASSERT(newJITCode.size() == generatedJITCode().size());
     }
 #endif
@@ -241,7 +241,7 @@ ExceptionInfo* EvalExecutable::reparseExceptionInfo(JSGlobalData* globalData, Sc
     if (globalData->canUseJIT())
 #endif
     {
-        JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+        JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), generatedJITCode().start());
         ASSERT(newJITCode.size() == generatedJITCode().size());
     }
 #endif
@@ -249,7 +249,7 @@ ExceptionInfo* EvalExecutable::reparseExceptionInfo(JSGlobalData* globalData, Sc
     return newCodeBlock->extractExceptionInfo();
 }
 
-void FunctionExecutable::recompile(ExecState*)
+void FunctionExecutable::recompile()
 {
     delete m_codeBlock;
     m_codeBlock = 0;
index 485cc0d6d65fbfd776eccbed676ac0c780db5c14..dec841a0ef4b28cf1ab7ae55544eb3421748997d 100644 (file)
@@ -287,7 +287,7 @@ namespace JSC {
         unsigned variableCount() const { return m_numVariables; }
         UString paramString() const;
 
-        void recompile(ExecState*);
+        void recompile();
         ExceptionInfo* reparseExceptionInfo(JSGlobalData*, ScopeChainNode*, CodeBlock*);
         void markAggregate(MarkStack& markStack);
         static PassRefPtr<FunctionExecutable> fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, int* errLine = 0, UString* errMsg = 0);
index ae9e038f74497b1f79d33c166e778fde81a20461..eb778ed2a12af2ef10f6d46ab4098df0cc42248e 100644 (file)
@@ -643,8 +643,6 @@ static int compareNumbersForQSort(const void* a, const void* b)
     return (da > db) - (da < db);
 }
 
-typedef std::pair<JSValue, UString> ValueStringPair;
-
 static int compareByStringPairForQSort(const void* a, const void* b)
 {
     const ValueStringPair* va = static_cast<const ValueStringPair*>(a);
@@ -704,6 +702,8 @@ void JSArray::sort(ExecState* exec)
         throwOutOfMemoryError(exec);
         return;
     }
+    
+    Heap::heap(this)->pushTempSortVector(&values);
 
     for (size_t i = 0; i < lengthNotIncludingUndefined; i++) {
         JSValue value = m_storage->m_vector[i];
@@ -711,17 +711,16 @@ void JSArray::sort(ExecState* exec)
         values[i].first = value;
     }
 
-    // FIXME: While calling these toString functions, the array could be mutated.
-    // In that case, objects pointed to by values in this vector might get garbage-collected!
-
     // FIXME: The following loop continues to call toString on subsequent values even after
     // a toString call raises an exception.
 
     for (size_t i = 0; i < lengthNotIncludingUndefined; i++)
         values[i].second = values[i].first.toString(exec);
 
-    if (exec->hadException())
+    if (exec->hadException()) {
+        Heap::heap(this)->popTempSortVector(&values);
         return;
+    }
 
     // FIXME: Since we sort by string value, a fast algorithm might be to use a radix sort. That would be O(N) rather
     // than O(N log N).
@@ -734,12 +733,18 @@ void JSArray::sort(ExecState* exec)
     qsort(values.begin(), values.size(), sizeof(ValueStringPair), compareByStringPairForQSort);
 #endif
 
-    // FIXME: If the toString function changed the length of the array, this might be
-    // modifying the vector incorrectly.
-
+    // If the toString function changed the length of the array or vector storage,
+    // increase the length to handle the orignal number of actual values.
+    if (m_vectorLength < lengthNotIncludingUndefined)
+        increaseVectorLength(lengthNotIncludingUndefined);
+    if (m_storage->m_length < lengthNotIncludingUndefined)
+        m_storage->m_length = lengthNotIncludingUndefined;
+        
     for (size_t i = 0; i < lengthNotIncludingUndefined; i++)
         m_storage->m_vector[i] = values[i].first;
 
+    Heap::heap(this)->popTempSortVector(&values);
+    
     checkConsistency(SortConsistencyCheck);
 }
 
index 73e62633d1c1380c0c53c265a2bc8ae663618afa..41b841e454a515f98559a93c2ded524a11ce2ec9 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "ArgList.h"
 #include "Collector.h"
+#include "CollectorHeapIterator.h"
 #include "CommonIdentifiers.h"
 #include "FunctionConstructor.h"
 #include "GetterSetter.h"
@@ -303,4 +304,21 @@ void JSGlobalData::dumpSampleData(ExecState* exec)
     interpreter->dumpSampleData(exec);
 }
 
+void JSGlobalData::recompileAllJSFunctions()
+{
+    // If JavaScript is running, it's not safe to recompile, since we'll end
+    // up throwing away code that is live on the stack.
+    ASSERT(!dynamicGlobalObject);
+
+    LiveObjectIterator it = heap.primaryHeapBegin();
+    LiveObjectIterator heapEnd = heap.primaryHeapEnd();
+    for ( ; it != heapEnd; ++it) {
+        if ((*it)->inherits(&JSFunction::info)) {
+            JSFunction* function = asFunction(*it);
+            if (!function->executable()->isHostFunction())
+                function->jsExecutable()->recompile();
+        }
+    }
+}
+
 } // namespace JSC
index c0f13f8d1e7df17859e4b34fcf012b0061e83415..6ccbf2c5a9872195cf3afc574653c432638ced10 100644 (file)
@@ -164,6 +164,7 @@ namespace JSC {
         
 #if ENABLE(ASSEMBLER)
         ExecutableAllocator executableAllocator;
+        ExecutableAllocator regexAllocator;
 #endif
 
 #if ENABLE(JIT)
@@ -229,6 +230,7 @@ namespace JSC {
         void startSampling();
         void stopSampling();
         void dumpSampleData(ExecState* exec);
+        void recompileAllJSFunctions();
         RegExpCache* regExpCache() { return m_regExpCache; }
     private:
         JSGlobalData(GlobalDataType, ThreadStackType);
index 1e2522681876ded163f4dcba388242597a9d0591..93ded4c7d9afb0d663b593630ecb0c1cc06d644b 100644 (file)
@@ -456,4 +456,21 @@ void JSGlobalObject::destroyJSGlobalObjectData(void* jsGlobalObjectData)
     delete static_cast<JSGlobalObjectData*>(jsGlobalObjectData);
 }
 
+DynamicGlobalObjectScope::DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject)
+    : m_dynamicGlobalObjectSlot(callFrame->globalData().dynamicGlobalObject)
+    , m_savedDynamicGlobalObject(m_dynamicGlobalObjectSlot)
+{
+    if (!m_dynamicGlobalObjectSlot) {
+#if ENABLE(JIT)
+        if (ExecutablePool::underMemoryPressure())
+            callFrame->globalData().recompileAllJSFunctions();
+#endif
+        m_dynamicGlobalObjectSlot = dynamicGlobalObject;
+
+        // Reset the date cache between JS invocations to force the VM
+        // to observe time zone changes.
+        callFrame->globalData().resetDateCache();
+    }
+}
+
 } // namespace JSC
index 6b9429abca97b02d2f5e620c79600a6e4b9bb08b..dff45182d2b508a2c7812ab46a7297391c35e7ed 100644 (file)
@@ -466,18 +466,7 @@ namespace JSC {
 
     class DynamicGlobalObjectScope : public Noncopyable {
     public:
-        DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject) 
-            : m_dynamicGlobalObjectSlot(callFrame->globalData().dynamicGlobalObject)
-            , m_savedDynamicGlobalObject(m_dynamicGlobalObjectSlot)
-        {
-            if (!m_dynamicGlobalObjectSlot) {
-                m_dynamicGlobalObjectSlot = dynamicGlobalObject;
-
-                // Reset the date cache between JS invocations to force the VM
-                // to observe time zone changes.
-                callFrame->globalData().resetDateCache();
-            }
-        }
+        DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject);
 
         ~DynamicGlobalObjectScope()
         {
index 1d5e639714f5f47811390be4d7bdeb8030936b8e..6128790f8e572ad784c123ba207a7ada0a783c21 100644 (file)
@@ -30,6 +30,8 @@
 #include "StringPrototype.h"
 
 namespace JSC {
+    
+static const unsigned resolveRopeForSubstringCutoff = 4;
 
 // Overview: this methods converts a JSString from holding a string in rope form
 // down to a simple UString representation.  It does so by building up the string
@@ -104,6 +106,59 @@ void JSString::resolveRope(ExecState* exec) const
         }
     }
 }
+    
+// This function construsts a substring out of a rope without flattening by reusing the existing fibers.
+// This can reduce memory usage substantially. Since traversing ropes is slow the function will revert 
+// back to flattening if the rope turns out to be long.
+JSString* JSString::substringFromRope(ExecState* exec, unsigned substringStart, unsigned substringLength)
+{
+    ASSERT(isRope());
+
+    JSGlobalData* globalData = &exec->globalData();
+
+    UString substringFibers[3];
+    
+    unsigned fiberCount = 0;
+    unsigned substringFiberCount = 0;
+    unsigned substringEnd = substringStart + substringLength;
+    unsigned fiberEnd = 0;
+
+    RopeIterator end;
+    for (RopeIterator it(m_other.m_fibers, m_fiberCount); it != end; ++it) {
+        ++fiberCount;
+        UStringImpl* fiberString = *it;
+        unsigned fiberStart = fiberEnd;
+        fiberEnd = fiberStart + fiberString->length();
+        if (fiberEnd <= substringStart)
+            continue;
+        unsigned copyStart = std::max(substringStart, fiberStart);
+        unsigned copyEnd = std::min(substringEnd, fiberEnd);
+        if (copyStart == fiberStart && copyEnd == fiberEnd)
+            substringFibers[substringFiberCount++] = UString(fiberString);
+        else
+            substringFibers[substringFiberCount++] = UString(UStringImpl::create(fiberString, copyStart - fiberStart, copyEnd - copyStart));
+        if (fiberEnd >= substringEnd)
+            break;
+        if (fiberCount > resolveRopeForSubstringCutoff || substringFiberCount >= 3) {
+            // This turned out to be a really inefficient rope. Just flatten it.
+            resolveRope(exec);
+            return jsSubstring(&exec->globalData(), m_value, substringStart, substringLength);
+        }
+    }
+    ASSERT(substringFiberCount && substringFiberCount <= 3);
+
+    if (substringLength == 1) {
+        ASSERT(substringFiberCount == 1);
+        UChar c = substringFibers[0].data()[0];
+        if (c <= 0xFF)
+            return globalData->smallStrings.singleCharacterString(globalData, c);
+    }
+    if (substringFiberCount == 1)
+        return new (globalData) JSString(globalData, substringFibers[0]);
+    if (substringFiberCount == 2)
+        return new (globalData) JSString(globalData, substringFibers[0], substringFibers[1]);
+    return new (globalData) JSString(globalData, substringFibers[0], substringFibers[1], substringFibers[2]);
+}
 
 JSValue JSString::replaceCharacter(ExecState* exec, UChar character, const UString& replacement)
 {
index dec925d4b5667752da2e0a1fd3ccb2234d2cdb7b..08d525fd5aa86f96a421e462f03218b0608ce469 100644 (file)
@@ -356,6 +356,7 @@ namespace JSC {
         }
 
         void resolveRope(ExecState*) const;
+        JSString* substringFromRope(ExecState*, unsigned offset, unsigned length);
 
         void appendStringInConstruct(unsigned& index, const UString& string)
         {
@@ -435,6 +436,7 @@ namespace JSC {
         friend JSValue jsString(ExecState* exec, Register* strings, unsigned count);
         friend JSValue jsString(ExecState* exec, JSValue thisValue, const ArgList& args);
         friend JSString* jsStringWithFinalizer(ExecState*, const UString&, JSStringFinalizerCallback callback, void* context);
+        friend JSString* jsSubstring(ExecState* exec, JSString* s, unsigned offset, unsigned length);
     };
 
     JSString* asString(JSValue);
@@ -519,6 +521,19 @@ namespace JSC {
         JSGlobalData* globalData = &exec->globalData();
         return fixupVPtr(globalData, new (globalData) JSString(globalData, s, callback, context));
     }
+    
+    inline JSString* jsSubstring(ExecState* exec, JSString* s, unsigned offset, unsigned length)
+    {
+        ASSERT(offset <= static_cast<unsigned>(s->length()));
+        ASSERT(length <= static_cast<unsigned>(s->length()));
+        ASSERT(offset + length <= static_cast<unsigned>(s->length()));
+        JSGlobalData* globalData = &exec->globalData();
+        if (!length)
+            return globalData->smallStrings.emptyString(globalData);
+        if (s->isRope())
+            return s->substringFromRope(exec, offset, length);
+        return jsSubstring(globalData, s->m_value, offset, length);
+    }
 
     inline JSString* jsSubstring(JSGlobalData* globalData, const UString& s, unsigned offset, unsigned length)
     {
index 52f89948a3b957c69a2f27a1e82a5bc6956e87b3..283b3da942f5dd6f27f21928b760c9fe72c2aedb 100644 (file)
@@ -848,7 +848,8 @@ namespace JSC {
         return asValue() == jsNull();
     }
 #endif // USE(JSVALUE32_64)
-
+    
+    typedef std::pair<JSValue, UString> ValueStringPair;
 } // namespace JSC
 
 #endif // JSValue_h
index 192df4d9b37d5cf2507a70bbec01c5b6dfd967fe..5e9d610c74bea4c9b93c79127e41a5c8602b0b5c 100644 (file)
@@ -33,7 +33,7 @@ namespace JSC {
 
 PassRefPtr<RegExp> RegExpCache::lookupOrCreate(const UString& patternString, const UString& flags)
 {
-    if (patternString.size() < maxCacheablePatternLength) {
+    if (isCacheable(patternString)) {
         pair<HashMap<RegExpKey, RefPtr<RegExp> >::iterator, bool> result = m_cacheMap.add(RegExpKey(flags, patternString), 0);
         if (!result.second)
             return result.first->second;
index 03b73acfddddb78a5c779b13ab7eb2100f26db9d..998d80bc0c9a3ff0cc38779a501f8cb18a5e9781 100644 (file)
@@ -39,10 +39,12 @@ public:
     PassRefPtr<RegExp> lookupOrCreate(const UString& patternString, const UString& flags);
     PassRefPtr<RegExp> create(const UString& patternString, const UString& flags);
     RegExpCache(JSGlobalData* globalData);
+    
+    static bool isCacheable(const UString& patternString) { return patternString.size() < maxCacheablePatternLength; }
 
 private:
     static const unsigned maxCacheablePatternLength = 256;
-    static const int maxCacheableEntries = 256;
+    static const int maxCacheableEntries = 32;
 
     typedef HashMap<RegExpKey, RefPtr<RegExp> > RegExpCacheMap;
     RegExpKey patternKeyArray[maxCacheableEntries];
index cd1cc89219a057f196d20619208b5626799e73e8..1e16513c05a36528ead3fba3a2f676b11dc205fb 100644 (file)
@@ -724,9 +724,17 @@ JSValue JSC_HOST_CALL stringProtoFuncSplit(ExecState* exec, JSObject*, JSValue t
 }
 
 JSValue JSC_HOST_CALL stringProtoFuncSubstr(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
-{
-    UString s = thisValue.toThisString(exec);
-    int len = s.size();
+{    
+    int len;
+    JSString* jsString = 0;
+    UString uString;
+    if (thisValue.isString()) {
+        jsString = static_cast<JSString*>(thisValue.asCell());
+        len = jsString->length();
+    } else {
+        uString = thisValue.toThisObject(exec)->toString(exec);
+        len = uString.size();
+    }
 
     JSValue a0 = args.at(0);
     JSValue a1 = args.at(1);
@@ -742,13 +750,26 @@ JSValue JSC_HOST_CALL stringProtoFuncSubstr(ExecState* exec, JSObject*, JSValue
     }
     if (start + length > len)
         length = len - start;
-    return jsSubstring(exec, s, static_cast<unsigned>(start), static_cast<unsigned>(length));
+    
+    unsigned substringStart = static_cast<unsigned>(start);
+    unsigned substringLength = static_cast<unsigned>(length);
+    if (jsString)
+        return jsSubstring(exec, jsString, substringStart, substringLength);
+    return jsSubstring(exec, uString, substringStart, substringLength);
 }
 
 JSValue JSC_HOST_CALL stringProtoFuncSubstring(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
 {
-    UString s = thisValue.toThisString(exec);
-    int len = s.size();
+    int len;
+    JSString* jsString = 0;
+    UString uString;
+    if (thisValue.isString()) {
+        jsString = static_cast<JSString*>(thisValue.asCell());
+        len = jsString->length();
+    } else {
+        uString = thisValue.toThisObject(exec)->toString(exec);
+        len = uString.size();
+    }
 
     JSValue a0 = args.at(0);
     JSValue a1 = args.at(1);
@@ -774,7 +795,11 @@ JSValue JSC_HOST_CALL stringProtoFuncSubstring(ExecState* exec, JSObject*, JSVal
         end = start;
         start = temp;
     }
-    return jsSubstring(exec, s, static_cast<unsigned>(start), static_cast<unsigned>(end) - static_cast<unsigned>(start));
+    unsigned substringStart = static_cast<unsigned>(start);
+    unsigned substringLength = static_cast<unsigned>(end) - substringStart;
+    if (jsString)
+        return jsSubstring(exec, jsString, substringStart, substringLength);
+    return jsSubstring(exec, uString, substringStart, substringLength);
 }
 
 JSValue JSC_HOST_CALL stringProtoFuncToLowerCase(ExecState* exec, JSObject*, JSValue thisValue, const ArgList&)
index 8f17a3e010ceb7797e73ba7484dc6b1a6a38fc58..8fb0dfdd03d3011bf885da7dbb78ba416e9bbc8e 100644 (file)
@@ -596,6 +596,8 @@ CString UString::UTF8String(bool strict) const
 {
     // Allocate a buffer big enough to hold all the characters.
     const unsigned length = size();
+    if (length > numeric_limits<unsigned>::max() / 3)
+        return CString(); 
     Vector<char, 1024> buffer(length * 3);
 
     // Convert to runs of 8-bit characters.
index e3b25bda9e361900ec156a5c7bb2509dc3a3d7b9..33e3d56cc35d760789dfdbc64a150e9a2a2e2152 100644 (file)
@@ -4468,9 +4468,8 @@ void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
 extern "C" {
 malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
     &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
-
     , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
-
+    , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
     };
 }
 
index 621f0f2fe1d829d3e79f8e6388e76bd24c5b0831..961e80fe8b4321be293e91f61a8601eafd04f832 100644 (file)
 #endif
 
 #define ENABLE_CONTEXT_MENUS 0
+#define ENABLE_DISK_IMAGE_CACHE 1
 #define ENABLE_DRAG_SUPPORT 0
 #define ENABLE_FTPDIR 1
 #define ENABLE_GEOLOCATION 1
 
 #define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1
 
-#define ENABLE_JIT 0
-#define ENABLE_YARR 0
-#define ENABLE_YARR_JIT 0
-#ifdef __llvm__
-#define WTF_USE_JSVALUE32_64 1
+#if defined(WTF_ARM_ARCH_VERSION) && WTF_ARM_ARCH_VERSION >= 7
+    // ARMv7;
+    #define WTF_USE_JSVALUE32_64 1
+    #define ENABLE_INTERPRETER 1
+    #define ENABLE_JIT 1
+    #define ENABLE_YARR 1
+    #define ENABLE_YARR_JIT 1
 #else
-#define WTF_USE_JSVALUE32 1
+    // ARMv6; never use the JIT, use JSVALUE32_64 only if compiling with llvm.
+    #define ENABLE_JIT 0
+    #define ENABLE_YARR 0
+    #define ENABLE_YARR_JIT 0
+    /* FIXME: <rdar://problem/7478149> gcc-4.2 compiler bug with USE(JSVALUE32_64) and armv6 target */
+    #ifdef __llvm__
+    #define WTF_USE_JSVALUE32_64 1
+    #else
+    #define WTF_USE_JSVALUE32 1
+    #endif
 #endif
 
 #undef ENABLE_3D_CANVAS
 #define ENABLE_CONTEXT_MENUS 1
 #endif
 
+#if !defined(ENABLE_DISK_IMAGE_CACHE)
+#define ENABLE_DISK_IMAGE_CACHE 0
+#endif
+
 #if !defined(ENABLE_DRAG_SUPPORT)
 #define ENABLE_DRAG_SUPPORT 1
 #endif
@@ -1144,4 +1160,8 @@ on MinGW. See https://bugs.webkit.org/show_bug.cgi?id=29268 */
 
 #define ENABLE_JSC_ZOMBIES 0
 
+#if CPU(ARM_THUMB2)
+#define ENABLE_BRANCH_COMPACTION 1
+#endif
+
 #endif /* WTF_Platform_h */
index 7d09f1221e25e9aef55d6496fcef203062b7697b..3ce3053eaed67c58f1c4a33d332e4d6c0ff62358 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2003, 2006, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003, 2006, 2008, 2009, 2010 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -27,7 +27,7 @@
 #include "config.h"
 #include "CString.h"
 
-using std::min;
+using namespace std;
 
 namespace WTF {
 
@@ -45,7 +45,10 @@ void CString::init(const char* str, unsigned length)
 {
     if (!str)
         return;
-    
+
+    if (length >= numeric_limits<size_t>::max())
+        CRASH();
+
     m_buffer = CStringBuffer::create(length + 1);
     memcpy(m_buffer->mutableData(), str, length); 
     m_buffer->mutableData()[length] = '\0';
@@ -61,6 +64,9 @@ char* CString::mutableData()
     
 CString CString::newUninitialized(size_t length, char*& characterBuffer)
 {
+    if (length >= numeric_limits<size_t>::max())
+        CRASH();
+
     CString result;
     result.m_buffer = CStringBuffer::create(length + 1);
     char* bytes = result.m_buffer->mutableData();
@@ -73,11 +79,11 @@ void CString::copyBufferIfNeeded()
 {
     if (!m_buffer || m_buffer->hasOneRef())
         return;
-        
-    int len = m_buffer->length();
-    RefPtr<CStringBuffer> m_temp = m_buffer;
-    m_buffer = CStringBuffer::create(len);
-    memcpy(m_buffer->mutableData(), m_temp->data(), len);
+
+    RefPtr<CStringBuffer> buffer = m_buffer.release();
+    size_t length = buffer->length();
+    m_buffer = CStringBuffer::create(length);
+    memcpy(m_buffer->mutableData(), buffer->data(), length);
 }
 
 bool operator==(const CString& a, const CString& b)
index 41a6610b4bf021312c546ee51715e404cc7b4c36..9d5333f98ca03a2ea1e6c27cd6c3ffebad282286 100644 (file)
@@ -77,7 +77,7 @@ PassRefPtr<StringImpl> StringImpl::createUninitialized(unsigned length, UChar*&
     // Allocate a single buffer large enough to contain the StringImpl
     // struct as well as the data which it contains. This removes one 
     // heap allocation from this call.
-    if (length > ((std::numeric_limits<size_t>::max() - sizeof(StringImpl)) / sizeof(UChar)))
+    if (length > ((std::numeric_limits<unsigned>::max() - sizeof(StringImpl)) / sizeof(UChar)))
         CRASH();
     size_t size = sizeof(StringImpl) + length * sizeof(UChar);
     StringImpl* string = static_cast<StringImpl*>(fastMalloc(size));
@@ -92,9 +92,9 @@ PassRefPtr<StringImpl> StringImpl::create(const UChar* characters, unsigned leng
         return empty();
 
     UChar* data;
-    PassRefPtr<StringImpl> string = createUninitialized(length, data);
+    RefPtr<StringImpl> string = createUninitialized(length, data);
     memcpy(data, characters, length * sizeof(UChar));
-    return string;
+    return string.release();
 }
 
 PassRefPtr<StringImpl> StringImpl::create(const char* characters, unsigned length)
@@ -103,19 +103,22 @@ PassRefPtr<StringImpl> StringImpl::create(const char* characters, unsigned lengt
         return empty();
 
     UChar* data;
-    PassRefPtr<StringImpl> string = createUninitialized(length, data);
+    RefPtr<StringImpl> string = createUninitialized(length, data);
     for (unsigned i = 0; i != length; ++i) {
         unsigned char c = characters[i];
         data[i] = c;
     }
-    return string;
+    return string.release();
 }
 
 PassRefPtr<StringImpl> StringImpl::create(const char* string)
 {
     if (!string)
         return empty();
-    return create(string, strlen(string));
+    size_t length = strlen(string);
+    if (length > numeric_limits<unsigned>::max())
+        CRASH();
+    return create(string, length);
 }
 
 PassRefPtr<StringImpl> StringImpl::create(const UChar* characters, unsigned length, PassRefPtr<SharedUChar> sharedBuffer)
@@ -201,7 +204,10 @@ PassRefPtr<StringImpl> StringImpl::lower()
     if (noUpper && !(ored & ~0x7F))
         return this;
 
+    if (m_length > static_cast<unsigned>(numeric_limits<int32_t>::max()))
+        CRASH();
     int32_t length = m_length;
+
     UChar* data;
     RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
 
@@ -232,7 +238,10 @@ PassRefPtr<StringImpl> StringImpl::upper()
     // but in empirical testing, few actual calls to upper() are no-ops, so
     // it wouldn't be worth the extra time for pre-scanning.
     UChar* data;
-    PassRefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+    RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+
+    if (m_length > static_cast<unsigned>(numeric_limits<int32_t>::max()))
+        CRASH();
     int32_t length = m_length;
 
     // Do a faster loop for the case where all the characters are ASCII.
@@ -243,7 +252,7 @@ PassRefPtr<StringImpl> StringImpl::upper()
         data[i] = toASCIIUpper(c);
     }
     if (!(ored & ~0x7F))
-        return newImpl;
+        return newImpl.release();
 
     // Do a slower implementation for cases that include non-ASCII characters.
     bool error;
@@ -254,47 +263,51 @@ PassRefPtr<StringImpl> StringImpl::upper()
     Unicode::toUpper(data, realLength, m_data, m_length, &error);
     if (error)
         return this;
-    return newImpl;
+    return newImpl.release();
 }
 
-PassRefPtr<StringImpl> StringImpl::secure(UChar aChar, bool last)
+PassRefPtr<StringImpl> StringImpl::secure(UChar character, bool hideLastCharacter)
 {
-    int length = m_length;
-    Vector<UChar> data(length);
-    if (length > 0) {
-        for (int i = 0; i <  length - 1; ++i)
-            data[i] = aChar;
-        data[length - 1] = (last ? aChar : m_data[length - 1]);
+    UChar* data;
+    RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+    if (m_length) {
+        const unsigned lastCharacterIndex = m_length - 1;
+        for (unsigned i = 0; i < lastCharacterIndex; ++i)
+            data[i] = character;
+        data[lastCharacterIndex] = hideLastCharacter ? character : m_data[lastCharacterIndex];
     }
-    return adopt(data);
+    return newImpl.release();
 }
 
 PassRefPtr<StringImpl> StringImpl::foldCase()
 {
     UChar* data;
-    PassRefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+    RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+
+    if (m_length > static_cast<unsigned>(numeric_limits<int32_t>::max()))
+        CRASH();
     int32_t length = m_length;
 
     // Do a faster loop for the case where all the characters are ASCII.
     UChar ored = 0;
-    for (int i = 0; i < length; i++) {
+    for (int32_t i = 0; i < length; i++) {
         UChar c = m_data[i];
         ored |= c;
         data[i] = toASCIILower(c);
     }
     if (!(ored & ~0x7F))
-        return newImpl;
+        return newImpl.release();
 
     // Do a slower implementation for cases that include non-ASCII characters.
     bool error;
     int32_t realLength = Unicode::foldCase(data, length, m_data, m_length, &error);
     if (!error && realLength == length)
-        return newImpl;
+        return newImpl.release();
     newImpl = createUninitialized(realLength, data);
     Unicode::foldCase(data, realLength, m_data, m_length, &error);
     if (error)
         return this;
-    return newImpl;
+    return newImpl.release();
 }
 
 PassRefPtr<StringImpl> StringImpl::stripWhiteSpace()
@@ -482,7 +495,10 @@ int StringImpl::find(const char* chs, int index, bool caseSensitive)
     if (!chs || index < 0)
         return -1;
 
-    int chsLength = strlen(chs);
+    size_t matchStringLength = strlen(chs);
+    if (matchStringLength > static_cast<unsigned>(numeric_limits<int>::max()))
+        CRASH();
+    int chsLength = matchStringLength;
     int n = m_length - index;
     if (n < 0)
         return -1;
@@ -661,7 +677,7 @@ PassRefPtr<StringImpl> StringImpl::replace(UChar oldC, UChar newC)
         return this;
 
     UChar* data;
-    PassRefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+    RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
 
     for (i = 0; i != m_length; ++i) {
         UChar ch = m_data[i];
@@ -669,7 +685,7 @@ PassRefPtr<StringImpl> StringImpl::replace(UChar oldC, UChar newC)
             ch = newC;
         data[i] = ch;
     }
-    return newImpl;
+    return newImpl.release();
 }
 
 PassRefPtr<StringImpl> StringImpl::replace(unsigned position, unsigned lengthToReplace, StringImpl* str)
@@ -684,14 +700,14 @@ PassRefPtr<StringImpl> StringImpl::replace(unsigned position, unsigned lengthToR
     if ((length() - lengthToReplace) >= (numeric_limits<unsigned>::max() - lengthToInsert))
         CRASH();
 
-    PassRefPtr<StringImpl> newImpl =
+    RefPtr<StringImpl> newImpl =
         createUninitialized(length() - lengthToReplace + lengthToInsert, data);
     memcpy(data, characters(), position * sizeof(UChar));
     if (str)
         memcpy(data + position, str->characters(), lengthToInsert * sizeof(UChar));
     memcpy(data + position + lengthToInsert, characters() + position + lengthToReplace,
         (length() - position - lengthToReplace) * sizeof(UChar));
-    return newImpl;
+    return newImpl.release();
 }
 
 PassRefPtr<StringImpl> StringImpl::replace(UChar pattern, StringImpl* replacement)
@@ -724,7 +740,7 @@ PassRefPtr<StringImpl> StringImpl::replace(UChar pattern, StringImpl* replacemen
     newSize += replaceSize;
 
     UChar* data;
-    PassRefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
+    RefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
 
     // Construct the new data
     int srcSegmentEnd;
@@ -746,7 +762,7 @@ PassRefPtr<StringImpl> StringImpl::replace(UChar pattern, StringImpl* replacemen
 
     ASSERT(dstOffset + srcSegmentLength == static_cast<int>(newImpl->length()));
 
-    return newImpl;
+    return newImpl.release();
 }
 
 PassRefPtr<StringImpl> StringImpl::replace(StringImpl* pattern, StringImpl* replacement)
@@ -782,7 +798,7 @@ PassRefPtr<StringImpl> StringImpl::replace(StringImpl* pattern, StringImpl* repl
     newSize += matchCount * repStrLength;
 
     UChar* data;
-    PassRefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
+    RefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
     
     // Construct the new data
     int srcSegmentEnd;
@@ -804,7 +820,7 @@ PassRefPtr<StringImpl> StringImpl::replace(StringImpl* pattern, StringImpl* repl
 
     ASSERT(dstOffset + srcSegmentLength == static_cast<int>(newImpl->length()));
 
-    return newImpl;
+    return newImpl.release();
 }
 
 bool equal(const StringImpl* a, const StringImpl* b)
@@ -952,9 +968,11 @@ int StringImpl::wordCount(int maxWordsToCount)
 PassRefPtr<StringImpl> StringImpl::createWithTerminatingNullCharacter(const StringImpl& string)
 {
     // Use createUninitialized instead of 'new StringImpl' so that the string and its buffer
-    // get allocated in a single malloc block.
+    // get allocated in a single memory block.
     UChar* data;
-    int length = string.m_length;
+    unsigned length = string.m_length;
+    if (length >= numeric_limits<unsigned>::max())
+        CRASH();
     RefPtr<StringImpl> terminatedString = createUninitialized(length + 1, data);
     memcpy(data, string.m_data, length * sizeof(UChar));
     data[length] = 0;
index 0a023826f44e07098ea6cde30a6245c4a7d9b47b..4354cabaf2ba3736a188e2110fa7ce74e6ff69ae 100644 (file)
@@ -171,11 +171,15 @@ public:
             return empty();
         }
 
-        if (length > ((std::numeric_limits<size_t>::max() - sizeof(StringImpl)) / sizeof(UChar)))
+        if (length > ((std::numeric_limits<unsigned>::max() - sizeof(StringImpl)) / sizeof(UChar))) {
+            output = 0;
             return 0;
+        }
         StringImpl* resultImpl;
-        if (!tryFastMalloc(sizeof(UChar) * length + sizeof(StringImpl)).getValue(resultImpl))
+        if (!tryFastMalloc(sizeof(UChar) * length + sizeof(StringImpl)).getValue(resultImpl)) {
+            output = 0;
             return 0;
+        }
         output = reinterpret_cast<UChar*>(resultImpl + 1);
         return adoptRef(new(resultImpl) StringImpl(length));
     }
@@ -189,6 +193,8 @@ public:
     {
         if (size_t size = vector.size()) {
             ASSERT(vector.data());
+            if (size > std::numeric_limits<unsigned>::max())
+                CRASH();
             return adoptRef(new StringImpl(vector.releaseBuffer(), size));
         }
         return empty();
@@ -284,7 +290,7 @@ public:
 
     PassRefPtr<StringImpl> lower();
     PassRefPtr<StringImpl> upper();
-    PassRefPtr<StringImpl> secure(UChar aChar, bool last = true);
+    PassRefPtr<StringImpl> secure(UChar, bool hideLastCharacter = true);
     PassRefPtr<StringImpl> foldCase();
 
     PassRefPtr<StringImpl> stripWhiteSpace();
index 842d755c8ee665539e320feaf86bfeb931a556ea..e73bac22c693bc0384ab70865e4c0648820f480a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * (C) 1999 Lars Knoll (knoll@kde.org)
- * Copyright (C) 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010 Apple Inc. All rights reserved.
  * Copyright (C) 2007-2009 Torch Mobile, Inc.
  *
  * This library is free software; you can redistribute it and/or
@@ -22,7 +22,6 @@
 #include "config.h"
 #include "WTFString.h"
 
-#include <limits>
 #include <stdarg.h>
 #include <wtf/ASCIICType.h>
 #include <wtf/text/CString.h>
@@ -34,6 +33,7 @@
 
 using namespace WTF;
 using namespace WTF::Unicode;
+using namespace std;
 
 namespace WebCore {
 
@@ -42,9 +42,12 @@ String::String(const UChar* str)
     if (!str)
         return;
         
-    int len = 0;
+    size_t len = 0;
     while (str[len] != UChar(0))
         len++;
+
+    if (len > numeric_limits<unsigned>::max())
+        CRASH();
     
     m_impl = StringImpl::create(str, len);
 }
@@ -61,8 +64,9 @@ void String::append(const String& str)
     if (str.m_impl) {
         if (m_impl) {
             UChar* data;
-            RefPtr<StringImpl> newImpl =
-                StringImpl::createUninitialized(m_impl->length() + str.length(), data);
+            if (str.length() > numeric_limits<unsigned>::max() - m_impl->length())
+                CRASH();
+            RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + str.length(), data);
             memcpy(data, m_impl->characters(), m_impl->length() * sizeof(UChar));
             memcpy(data + m_impl->length(), str.characters(), str.length() * sizeof(UChar));
             m_impl = newImpl.release();
@@ -79,8 +83,9 @@ void String::append(char c)
     // call to fastMalloc every single time.
     if (m_impl) {
         UChar* data;
-        RefPtr<StringImpl> newImpl =
-            StringImpl::createUninitialized(m_impl->length() + 1, data);
+        if (m_impl->length() >= numeric_limits<unsigned>::max())
+            CRASH();
+        RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + 1, data);
         memcpy(data, m_impl->characters(), m_impl->length() * sizeof(UChar));
         data[m_impl->length()] = c;
         m_impl = newImpl.release();
@@ -96,8 +101,9 @@ void String::append(UChar c)
     // call to fastMalloc every single time.
     if (m_impl) {
         UChar* data;
-        RefPtr<StringImpl> newImpl =
-            StringImpl::createUninitialized(m_impl->length() + 1, data);
+        if (m_impl->length() >= numeric_limits<unsigned>::max())
+            CRASH();
+        RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + 1, data);
         memcpy(data, m_impl->characters(), m_impl->length() * sizeof(UChar));
         data[m_impl->length()] = c;
         m_impl = newImpl.release();
@@ -152,8 +158,9 @@ void String::append(const UChar* charactersToAppend, unsigned lengthToAppend)
 
     ASSERT(charactersToAppend);
     UChar* data;
-    RefPtr<StringImpl> newImpl =
-        StringImpl::createUninitialized(length() + lengthToAppend, data);
+    if (lengthToAppend > numeric_limits<unsigned>::max() - length())
+        CRASH();
+    RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() + lengthToAppend, data);
     memcpy(data, characters(), length() * sizeof(UChar));
     memcpy(data + length(), charactersToAppend, lengthToAppend * sizeof(UChar));
     m_impl = newImpl.release();
@@ -173,8 +180,9 @@ void String::insert(const UChar* charactersToInsert, unsigned lengthToInsert, un
 
     ASSERT(charactersToInsert);
     UChar* data;
-    RefPtr<StringImpl> newImpl =
-      StringImpl::createUninitialized(length() + lengthToInsert, data);
+    if (lengthToInsert > numeric_limits<unsigned>::max() - length())
+        CRASH();
+    RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() + lengthToInsert, data);
     memcpy(data, characters(), position * sizeof(UChar));
     memcpy(data + position, charactersToInsert, lengthToInsert * sizeof(UChar));
     memcpy(data + position + lengthToInsert, characters() + position, (length() - position) * sizeof(UChar));
@@ -207,8 +215,7 @@ void String::remove(unsigned position, int lengthToRemove)
     if (static_cast<unsigned>(lengthToRemove) > length() - position)
         lengthToRemove = length() - position;
     UChar* data;
-    RefPtr<StringImpl> newImpl =
-        StringImpl::createUninitialized(length() - lengthToRemove, data);
+    RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() - lengthToRemove, data);
     memcpy(data, characters(), position * sizeof(UChar));
     memcpy(data + position, characters() + position + lengthToRemove,
         (length() - lengthToRemove - position) * sizeof(UChar));
@@ -646,6 +653,8 @@ CString String::utf8() const
     //  * We could allocate a CStringBuffer with an appropriate size to
     //    have a good chance of being able to write the string into the
     //    buffer without reallocing (say, 1.5 x length).
+    if (length > numeric_limits<unsigned>::max() / 3)
+        return CString();
     Vector<char, 1024> bufferVector(length * 3);
 
     char* buffer = bufferVector.data();
@@ -670,6 +679,9 @@ CString String::utf8() const
 
 String String::fromUTF8(const char* stringStart, size_t length)
 {
+    if (length > numeric_limits<unsigned>::max())
+        CRASH();
+
     if (!stringStart)
         return String();
 
@@ -729,8 +741,8 @@ static bool isCharacterAllowedInBase(UChar c, int base)
 template <typename IntegralType>
 static inline IntegralType toIntegralType(const UChar* data, size_t length, bool* ok, int base)
 {
-    static const IntegralType integralMax = std::numeric_limits<IntegralType>::max();
-    static const bool isSigned = std::numeric_limits<IntegralType>::is_signed;
+    static const IntegralType integralMax = numeric_limits<IntegralType>::max();
+    static const bool isSigned = numeric_limits<IntegralType>::is_signed;
     const IntegralType maxMultiplier = integralMax / base;
 
     IntegralType value = 0;
index 7707cbabe040790e411165036785667b915eedc5..b954b1c6b9f91985d6b34fd63f853618665d017d 100644 (file)
@@ -30,6 +30,7 @@
 #include "JSGlobalData.h"
 #include "LinkBuffer.h"
 #include "MacroAssembler.h"
+#include "RegExpCache.h"
 #include "RegexCompiler.h"
 
 #include "pcre.h" // temporary, remove when fallback is removed.
@@ -1384,7 +1385,7 @@ public:
     {
         generate();
 
-        LinkBuffer patchBuffer(this, globalData->executableAllocator.poolForSize(size()));
+        LinkBuffer patchBuffer(this, globalData->regexAllocator.poolForSize(size()), 0);
 
         for (unsigned i = 0; i < m_backtrackRecords.size(); ++i)
             patchBuffer.patch(m_backtrackRecords[i].dataLabel, patchBuffer.locationOf(m_backtrackRecords[i].backtrackLocation));
@@ -1404,7 +1405,7 @@ void jitCompileRegex(JSGlobalData* globalData, RegexCodeBlock& jitObject, const
         return;
     numSubpatterns = pattern.m_numSubpatterns;
 
-    if (!pattern.m_shouldFallBack && globalData->canUseJIT()) {
+    if (!pattern.m_shouldFallBack && globalData->canUseJIT() && RegExpCache::isCacheable(patternString)) {
         RegexGenerator generator(pattern);
         generator.compile(globalData, jitObject);
         return;