From: Apple Date: Sat, 7 May 2011 18:58:22 +0000 (+0000) Subject: JavaScriptCore-721.26.tar.gz X-Git-Tag: ios-43^0 X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/commitdiff_plain/b80e619319b1def83d1e8b4f84042b661be1be7f JavaScriptCore-721.26.tar.gz --- diff --git a/JavaScriptCore.exp b/JavaScriptCore.exp index 3f408c6..6b5f9b7 100644 --- a/JavaScriptCore.exp +++ b/JavaScriptCore.exp @@ -1,5 +1,3 @@ -__ZN7WebCore10StringImpl6createEPKcj -__ZN7WebCore12AtomicString11addSlowCaseEPNS_10StringImplE _JSCheckScriptSyntax _JSClassCreate _JSClassRelease @@ -90,7 +88,6 @@ _JSWeakObjectMapClear _JSWeakObjectMapCreate _JSWeakObjectMapGet _JSWeakObjectMapSet -_WebCoreWebThreadIsLockedOrDisabled _WTFLog _WTFLogVerbose _WTFReportArgumentAssertionFailure @@ -98,6 +95,7 @@ _WTFReportAssertionFailure _WTFReportAssertionFailureWithMessage _WTFReportError _WTFReportFatalError +_WebCoreWebThreadIsLockedOrDisabled __Z12jsRegExpFreeP8JSRegExp __Z15jsRegExpCompilePKti24JSRegExpIgnoreCaseOption23JSRegExpMultilineOptionPjPPKc __Z15jsRegExpExecutePK8JSRegExpPKtiiPii @@ -193,6 +191,7 @@ __ZN3JSC20MarkedArgumentBuffer10slowAppendENS_7JSValueE __ZN3JSC23AbstractSamplingCounter4dumpEv __ZN3JSC23objectProtoFuncToStringEPNS_9ExecStateEPNS_8JSObjectENS_7JSValueERKNS_7ArgListE __ZN3JSC23setUpStaticFunctionSlotEPNS_9ExecStateEPKNS_9HashEntryEPNS_8JSObjectERKNS_10IdentifierERNS_12PropertySlotE +__ZN3JSC24DynamicGlobalObjectScopeC1EPNS_9ExecStateEPNS_14JSGlobalObjectE __ZN3JSC24createStackOverflowErrorEPNS_9ExecStateE __ZN3JSC25evaluateInGlobalCallFrameERKNS_7UStringERNS_7JSValueEPNS_14JSGlobalObjectE __ZN3JSC35createInterruptedExecutionExceptionEPNS_12JSGlobalDataE @@ -393,6 +392,7 @@ __ZN7WebCore10StringImpl5lowerEv __ZN7WebCore10StringImpl5toIntEPb __ZN7WebCore10StringImpl5upperEv __ZN7WebCore10StringImpl6createEPKc +__ZN7WebCore10StringImpl6createEPKcj __ZN7WebCore10StringImpl6createEPKtj __ZN7WebCore10StringImpl6createEPKtjN3WTF10PassRefPtrINS3_21CrossThreadRefCountedINS3_16OwnFastMallocPtrIS1_EEEEEE __ZN7WebCore10StringImpl6secureEtb @@ -404,6 +404,7 @@ __ZN7WebCore10StringImpl8endsWithEPS0_b __ZN7WebCore10StringImpl9substringEjj __ZN7WebCore10StringImplD1Ev __ZN7WebCore11commentAtomE +__ZN7WebCore12AtomicString11addSlowCaseEPNS_10StringImplE __ZN7WebCore12AtomicString3addEPKc __ZN7WebCore12AtomicString3addEPKt __ZN7WebCore12AtomicString3addEPKtj @@ -546,3 +547,6 @@ __ZTVN3JSC8JSObjectE __ZTVN3JSC8JSStringE _jscore_fastmalloc_introspection _kJSClassDefinitionEmpty + +# iOS Methods +__ZN3JSC12JSGlobalData20sharedInstanceExistsEv diff --git a/assembler/ARMv7Assembler.cpp b/assembler/ARMv7Assembler.cpp new file mode 100644 index 0000000..7aa1f10 --- /dev/null +++ b/assembler/ARMv7Assembler.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2010 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) + +#include "ARMv7Assembler.h" + +namespace JSC { + +const int ARMv7Assembler::JumpSizes[] = { 0xffffffff, sizeof(uint16_t), sizeof(uint16_t), + 2 * sizeof(uint16_t), 2 * sizeof(uint16_t), 3 * sizeof(uint16_t), 5 * sizeof(uint16_t), 6 * sizeof(uint16_t) }; +const int ARMv7Assembler::JumpPaddingSizes[] = { 0, 5 * sizeof(uint16_t), 6 * sizeof(uint16_t), + 5 * sizeof(uint16_t), 6 * sizeof(uint16_t) }; + +} + +#endif diff --git a/assembler/ARMv7Assembler.h b/assembler/ARMv7Assembler.h index 2faa3a6..13ad3e0 100644 --- a/assembler/ARMv7Assembler.h +++ b/assembler/ARMv7Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. * Copyright (C) 2010 University of Szeged * * Redistribution and use in source and binary forms, with or without @@ -304,7 +304,7 @@ public: } if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) { - encoding.immediate = bytes.byte0; + encoding.immediate = bytes.byte1; encoding.pattern = 2; return ARMThumbImmediate(TypeEncoded, encoding); } @@ -440,12 +440,11 @@ private: }; struct { unsigned type : 2; - unsigned amount : 5; + unsigned amount : 6; }; } m_u; }; - class ARMv7Assembler { public: ~ARMv7Assembler() @@ -476,14 +475,45 @@ public: ConditionGT, ConditionLE, ConditionAL, - + ConditionCS = ConditionHS, ConditionCC = ConditionLO, } Condition; + enum JumpType { JumpFixed, JumpNoCondition, JumpCondition, JumpNoConditionFixedSize, JumpConditionFixedSize, JumpTypeCount }; + enum JumpLinkType { LinkInvalid, LinkJumpT1, LinkJumpT2, LinkJumpT3, + LinkJumpT4, LinkConditionalJumpT4, LinkBX, LinkConditionalBX, JumpLinkTypeCount }; + static const int JumpSizes[JumpLinkTypeCount]; + static const int JumpPaddingSizes[JumpTypeCount]; + class LinkRecord { + public: + LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition) + : m_from(from) + , m_to(to) + , m_type(type) + , m_linkType(LinkInvalid) + , m_condition(condition) + { + } + intptr_t from() const { return m_from; } + void setFrom(intptr_t from) { m_from = from; } + intptr_t to() const { return m_to; } + JumpType type() const { return m_type; } + JumpLinkType linkType() const { return m_linkType; } + void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; } + Condition condition() const { return m_condition; } + private: + intptr_t m_from : 31; + intptr_t m_to : 31; + JumpType m_type : 3; + JumpLinkType m_linkType : 4; + Condition m_condition : 16; + }; + class JmpSrc { friend class ARMv7Assembler; friend class ARMInstructionFormatter; + friend class LinkBuffer; public: JmpSrc() : m_offset(-1) @@ -491,17 +521,32 @@ public: } private: - JmpSrc(int offset) + JmpSrc(int offset, JumpType type) + : m_offset(offset) + , m_condition(0xffff) + , m_type(type) + { + ASSERT(m_type == JumpFixed || m_type == JumpNoCondition || m_type == JumpNoConditionFixedSize); + } + + JmpSrc(int offset, JumpType type, Condition condition) : m_offset(offset) + , m_condition(condition) + , m_type(type) { + ASSERT(m_type == JumpFixed || m_type == JumpCondition || m_type == JumpConditionFixedSize); } int m_offset; + Condition m_condition : 16; + JumpType m_type : 16; + }; class JmpDst { friend class ARMv7Assembler; friend class ARMInstructionFormatter; + friend class LinkBuffer; public: JmpDst() : m_offset(-1) @@ -525,17 +570,6 @@ public: private: - struct LinkRecord { - LinkRecord(intptr_t from, intptr_t to) - : from(from) - , to(to) - { - } - - intptr_t from; - intptr_t to; - }; - // ARMv7, Appx-A.6.3 bool BadReg(RegisterID reg) { @@ -597,6 +631,8 @@ private: } OpcodeID; typedef enum { + OP_B_T1 = 0xD000, + OP_B_T2 = 0xE000, OP_AND_reg_T2 = 0xEA00, OP_TST_reg_T2 = 0xEA10, OP_ORR_reg_T2 = 0xEA40, @@ -620,10 +656,11 @@ private: OP_VADD_T2 = 0xEE30, OP_VSUB_T2 = 0xEE30, OP_VDIV = 0xEE80, - OP_VCMP_T1 = 0xEEB0, + OP_VCMP = 0xEEB0, OP_VCVT_FPIVFP = 0xEEB0, OP_VMOV_IMM_T2 = 0xEEB0, OP_VMRS = 0xEEB0, + OP_B_T3a = 0xF000, OP_B_T4a = 0xF000, OP_AND_imm_T1 = 0xF000, OP_TST_imm = 0xF010, @@ -672,10 +709,11 @@ private: OP_VMOV_CtoSb = 0x0A10, OP_VMOV_StoCb = 0x0A10, OP_VMRSb = 0x0A10, - OP_VCMP_T1b = 0x0A40, + OP_VCMPb = 0x0A40, OP_VCVT_FPIVFPb = 0x0A40, OP_VSUB_T2b = 0x0A40, OP_NOP_T2b = 0x8000, + OP_B_T3b = 0x8000, OP_B_T4b = 0x9000, } OpcodeID2; @@ -712,7 +750,7 @@ private: | (ifThenElseConditionBit(condition, inst3if) << 2) | (ifThenElseConditionBit(condition, inst4if) << 1) | 1; - ASSERT((condition != ConditionAL) || (mask & (mask - 1))); + ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) @@ -720,26 +758,25 @@ private: int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | (ifThenElseConditionBit(condition, inst3if) << 2) | 2; - ASSERT((condition != ConditionAL) || (mask & (mask - 1))); + ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } uint8_t ifThenElse(Condition condition, bool inst2if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | 4; - ASSERT((condition != ConditionAL) || (mask & (mask - 1))); + ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } uint8_t ifThenElse(Condition condition) { int mask = 8; - ASSERT((condition != ConditionAL) || (mask & (mask - 1))); return (condition << 4) | mask; } public: - + void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. @@ -878,27 +915,33 @@ public: ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); } - + // Only allowed in IT (if then) block if last instruction. - JmpSrc b() + JmpSrc b(JumpType type) { m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b); - return JmpSrc(m_formatter.size()); + return JmpSrc(m_formatter.size(), type); } // Only allowed in IT (if then) block if last instruction. - JmpSrc blx(RegisterID rm) + JmpSrc blx(RegisterID rm, JumpType type) { ASSERT(rm != ARMRegisters::pc); m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8); - return JmpSrc(m_formatter.size()); + return JmpSrc(m_formatter.size(), type); } // Only allowed in IT (if then) block if last instruction. - JmpSrc bx(RegisterID rm) + JmpSrc bx(RegisterID rm, JumpType type, Condition condition) + { + m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0); + return JmpSrc(m_formatter.size(), type, condition); + } + + JmpSrc bx(RegisterID rm, JumpType type) { m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0); - return JmpSrc(m_formatter.size()); + return JmpSrc(m_formatter.size(), type); } void bkpt(uint8_t imm=0) @@ -1513,7 +1556,12 @@ public: void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm) { - m_formatter.vfpOp(OP_VCMP_T1, OP_VCMP_T1b, true, VFPOperand(4), rd, rm); + m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm); + } + + void vcmpz_F64(FPDoubleRegisterID rd) + { + m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0)); } void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm) @@ -1538,11 +1586,6 @@ public: m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm); } - void vmov_F64_0(FPDoubleRegisterID rd) - { - m_formatter.vfpOp(OP_VMOV_IMM_T2, OP_VMOV_IMM_T2b, true, VFPOperand(0), rd, VFPOperand(0)); - } - void vmov(RegisterID rd, FPSingleRegisterID rn) { ASSERT(!BadReg(rd)); @@ -1617,6 +1660,15 @@ public: { return dst.m_offset - src.m_offset; } + + int executableOffsetFor(int location) + { + if (!location) + return 0; + return static_cast(m_formatter.data())[location / sizeof(int32_t) - 1]; + } + + int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JumpPaddingSizes[jumpType] - JumpSizes[jumpLinkType]; } // Assembler admin methods: @@ -1625,22 +1677,125 @@ public: return m_formatter.size(); } - void* executableCopy(ExecutablePool* allocator) + static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) { - void* copy = m_formatter.executableCopy(allocator); + return a.from() < b.from(); + } - unsigned jumpCount = m_jumpsToLink.size(); - for (unsigned i = 0; i < jumpCount; ++i) { - uint16_t* location = reinterpret_cast(reinterpret_cast(copy) + m_jumpsToLink[i].from); - uint16_t* target = reinterpret_cast(reinterpret_cast(copy) + m_jumpsToLink[i].to); - linkJumpAbsolute(location, target); + bool canCompact(JumpType jumpType) + { + // The following cannot be compacted: + // JumpFixed: represents custom jump sequence + // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size + // JumpConditionFixedSize: represents conditional jump that must remain a fixed size + return (jumpType == JumpNoCondition) || (jumpType == JumpCondition); + } + + JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + { + if (jumpType == JumpFixed) + return LinkInvalid; + + // for patchable jump we must leave space for the longest code sequence + if (jumpType == JumpNoConditionFixedSize) + return LinkBX; + if (jumpType == JumpConditionFixedSize) + return LinkConditionalBX; + + const int paddingSize = JumpPaddingSizes[jumpType]; + bool mayTriggerErrata = false; + + if (jumpType == JumpCondition) { + // 2-byte conditional T1 + const uint16_t* jumpT1Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT1])); + if (canBeJumpT1(jumpT1Location, to)) + return LinkJumpT1; + // 4-byte conditional T3 + const uint16_t* jumpT3Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT3])); + if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) { + if (!mayTriggerErrata) + return LinkJumpT3; + } + // 4-byte conditional T4 with IT + const uint16_t* conditionalJumpT4Location = + reinterpret_cast(from - (paddingSize - JumpSizes[LinkConditionalJumpT4])); + if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) { + if (!mayTriggerErrata) + return LinkConditionalJumpT4; + } + } else { + // 2-byte unconditional T2 + const uint16_t* jumpT2Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT2])); + if (canBeJumpT2(jumpT2Location, to)) + return LinkJumpT2; + // 4-byte unconditional T4 + const uint16_t* jumpT4Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT4])); + if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) { + if (!mayTriggerErrata) + return LinkJumpT4; + } + // use long jump sequence + return LinkBX; + } + + ASSERT(jumpType == JumpCondition); + return LinkConditionalBX; + } + + JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) + { + JumpLinkType linkType = computeJumpType(record.type(), from, to); + record.setLinkType(linkType); + return linkType; + } + + void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) + { + int32_t ptr = regionStart / sizeof(int32_t); + const int32_t end = regionEnd / sizeof(int32_t); + int32_t* offsets = static_cast(m_formatter.data()); + while (ptr < end) + offsets[ptr++] = offset; + } + + Vector& jumpsToLink() + { + std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); + return m_jumpsToLink; + } + + void link(LinkRecord& record, uint8_t* from, uint8_t* to) + { + switch (record.linkType()) { + case LinkJumpT1: + linkJumpT1(record.condition(), reinterpret_cast(from), to); + break; + case LinkJumpT2: + linkJumpT2(reinterpret_cast(from), to); + break; + case LinkJumpT3: + linkJumpT3(record.condition(), reinterpret_cast(from), to); + break; + case LinkJumpT4: + linkJumpT4(reinterpret_cast(from), to); + break; + case LinkConditionalJumpT4: + linkConditionalJumpT4(record.condition(), reinterpret_cast(from), to); + break; + case LinkConditionalBX: + linkConditionalBX(record.condition(), reinterpret_cast(from), to); + break; + case LinkBX: + linkBX(reinterpret_cast(from), to); + break; + default: + ASSERT_NOT_REACHED(); + break; } - m_jumpsToLink.clear(); - - ASSERT(copy); - return copy; } + void* unlinkedCode() { return m_formatter.data(); } + static unsigned getCallReturnOffset(JmpSrc call) { ASSERT(call.m_offset >= 0); @@ -1659,7 +1814,7 @@ public: { ASSERT(to.m_offset != -1); ASSERT(from.m_offset != -1); - m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset)); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, from.m_type, from.m_condition)); } static void linkJump(void* code, JmpSrc from, void* to) @@ -1702,8 +1857,6 @@ public: ASSERT(reinterpret_cast(to) & 1); setPointer(reinterpret_cast(from) - 1, to); - - ExecutableAllocator::cacheFlush(reinterpret_cast(from) - 5, 4 * sizeof(uint16_t)); } static void repatchInt32(void* where, int32_t value) @@ -1711,8 +1864,6 @@ public: ASSERT(!(reinterpret_cast(where) & 1)); setInt32(where, value); - - ExecutableAllocator::cacheFlush(reinterpret_cast(where) - 4, 4 * sizeof(uint16_t)); } static void repatchPointer(void* where, void* value) @@ -1720,8 +1871,6 @@ public: ASSERT(!(reinterpret_cast(where) & 1)); setPointer(where, value); - - ExecutableAllocator::cacheFlush(reinterpret_cast(where) - 4, 4 * sizeof(uint16_t)); } static void repatchLoadPtrToLEA(void* where) @@ -1862,19 +2011,38 @@ private: return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b); } - static void linkJumpAbsolute(uint16_t* instruction, void* target) + static bool canBeJumpT1(const uint16_t* instruction, const void* target) { - // FIMXE: this should be up in the MacroAssembler layer. :-( - const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; - ASSERT(!(reinterpret_cast(instruction) & 1)); ASSERT(!(reinterpret_cast(target) & 1)); - - ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1)) - || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) ); - + + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); + // It does not appear to be documented in the ARM ARM (big surprise), but + // for OP_B_T1 the branch displacement encoded in the instruction is 2 + // less than the actual displacement. + relative -= 2; + return ((relative << 23) >> 23) == relative; + } + + static bool canBeJumpT2(const uint16_t* instruction, const void* target) + { + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); + // It does not appear to be documented in the ARM ARM (big surprise), but + // for OP_B_T2 the branch displacement encoded in the instruction is 2 + // less than the actual displacement. + relative -= 2; + return ((relative << 20) >> 20) == relative; + } + + static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata) + { + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); - // From Cortex-A8 errata: // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and // the target of the branch falls within the first region it is @@ -1883,17 +2051,154 @@ private: // to enter a deadlock state. // The instruction is spanning two pages if it ends at an address ending 0x002 bool spansTwo4K = ((reinterpret_cast(instruction) & 0xfff) == 0x002); + mayTriggerErrata = spansTwo4K; // The target is in the first page if the jump branch back by [3..0x1002] bytes bool targetInFirstPage = (relative >= -0x1002) && (relative < -2); bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage; - - if (((relative << 7) >> 7) == relative && !wouldTriggerA8Errata) { - // ARM encoding for the top two bits below the sign bit is 'peculiar'. - if (relative >= 0) - relative ^= 0xC00000; - - // All branch offsets should be an even distance. - ASSERT(!(relative & 1)); + return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata; + } + + static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata) + { + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); + // From Cortex-A8 errata: + // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and + // the target of the branch falls within the first region it is + // possible for the processor to incorrectly determine the branch + // instruction, and it is also possible in some cases for the processor + // to enter a deadlock state. + // The instruction is spanning two pages if it ends at an address ending 0x002 + bool spansTwo4K = ((reinterpret_cast(instruction) & 0xfff) == 0x002); + mayTriggerErrata = spansTwo4K; + // The target is in the first page if the jump branch back by [3..0x1002] bytes + bool targetInFirstPage = (relative >= -0x1002) && (relative < -2); + bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage; + return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata; + } + + void linkJumpT1(Condition cond, uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + ASSERT(canBeJumpT1(instruction, target)); + + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); + // It does not appear to be documented in the ARM ARM (big surprise), but + // for OP_B_T1 the branch displacement encoded in the instruction is 2 + // less than the actual displacement. + relative -= 2; + + // All branch offsets should be an even distance. + ASSERT(!(relative & 1)); + instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1); + } + + static void linkJumpT2(uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + ASSERT(canBeJumpT2(instruction, target)); + + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); + // It does not appear to be documented in the ARM ARM (big surprise), but + // for OP_B_T2 the branch displacement encoded in the instruction is 2 + // less than the actual displacement. + relative -= 2; + + // All branch offsets should be an even distance. + ASSERT(!(relative & 1)); + instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1); + } + + void linkJumpT3(Condition cond, uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + bool scratch; + UNUSED_PARAM(scratch); + ASSERT(canBeJumpT3(instruction, target, scratch)); + + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); + + // All branch offsets should be an even distance. + ASSERT(!(relative & 1)); + instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12); + instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1); + } + + static void linkJumpT4(uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + bool scratch; + UNUSED_PARAM(scratch); + ASSERT(canBeJumpT4(instruction, target, scratch)); + + intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); + // ARM encoding for the top two bits below the sign bit is 'peculiar'. + if (relative >= 0) + relative ^= 0xC00000; + + // All branch offsets should be an even distance. + ASSERT(!(relative & 1)); + instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12); + instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); + } + + void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + + instruction[-3] = ifThenElse(cond) | OP_IT; + linkJumpT4(instruction, target); + } + + static void linkBX(uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + + const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; + ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) + 1)); + ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) >> 16)); + instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); + instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); + instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); + instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); + instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); + } + + void linkConditionalBX(Condition cond, uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + + linkBX(instruction, target); + instruction[-6] = ifThenElse(cond, true, true) | OP_IT; + } + + static void linkJumpAbsolute(uint16_t* instruction, void* target) + { + // FIMXE: this should be up in the MacroAssembler layer. :-( + ASSERT(!(reinterpret_cast(instruction) & 1)); + ASSERT(!(reinterpret_cast(target) & 1)); + + ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1)) + || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2))); + + bool scratch; + if (canBeJumpT4(instruction, target, scratch)) { // There may be a better way to fix this, but right now put the NOPs first, since in the // case of an conditional branch this will be coming after an ITTT predicating *three* // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to @@ -1902,9 +2207,9 @@ private: instruction[-5] = OP_NOP_T1; instruction[-4] = OP_NOP_T2a; instruction[-3] = OP_NOP_T2b; - instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12); - instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); + linkJumpT4(instruction, target); } else { + const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) + 1)); ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(reinterpret_cast(target) >> 16)); instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); @@ -1914,11 +2219,12 @@ private: instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); } } - + static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm) { return op | (imm.m_value.i << 10) | imm.m_value.imm4; } + static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm) { return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8; @@ -2035,6 +2341,7 @@ private: } m_formatter; Vector m_jumpsToLink; + Vector m_offsets; }; } // namespace JSC diff --git a/assembler/AbstractMacroAssembler.h b/assembler/AbstractMacroAssembler.h index aab9089..5db2cb9 100644 --- a/assembler/AbstractMacroAssembler.h +++ b/assembler/AbstractMacroAssembler.h @@ -418,12 +418,6 @@ public: // Section 3: Misc admin methods - - static CodePtr trampolineAt(CodeRef ref, Label label) - { - return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label)); - } - size_t size() { return m_assembler.size(); @@ -479,6 +473,9 @@ public: { return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); } + + void beginUninterruptedSequence() { } + void endUninterruptedSequence() { } protected: AssemblerType m_assembler; diff --git a/assembler/LinkBuffer.h b/assembler/LinkBuffer.h index 47cac5a..ae58946 100644 --- a/assembler/LinkBuffer.h +++ b/assembler/LinkBuffer.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -49,25 +49,34 @@ namespace JSC { // class LinkBuffer : public Noncopyable { typedef MacroAssemblerCodeRef CodeRef; + typedef MacroAssemblerCodePtr CodePtr; typedef MacroAssembler::Label Label; typedef MacroAssembler::Jump Jump; typedef MacroAssembler::JumpList JumpList; typedef MacroAssembler::Call Call; typedef MacroAssembler::DataLabel32 DataLabel32; typedef MacroAssembler::DataLabelPtr DataLabelPtr; + typedef MacroAssembler::JmpDst JmpDst; +#if ENABLE(BRANCH_COMPACTION) + typedef MacroAssembler::LinkRecord LinkRecord; + typedef MacroAssembler::JumpLinkType JumpLinkType; +#endif public: // Note: Initialization sequence is significant, since executablePool is a PassRefPtr. // First, executablePool is copied into m_executablePool, then the initialization of // m_code uses m_executablePool, *not* executablePool, since this is no longer valid. - LinkBuffer(MacroAssembler* masm, PassRefPtr executablePool) + // The linkOffset parameter should only be non-null when recompiling for exception info + LinkBuffer(MacroAssembler* masm, PassRefPtr executablePool, void* linkOffset) : m_executablePool(executablePool) - , m_code(masm->m_assembler.executableCopy(m_executablePool.get())) - , m_size(masm->m_assembler.size()) + , m_size(0) + , m_code(0) + , m_assembler(masm) #ifndef NDEBUG , m_completed(false) #endif { + linkCode(linkOffset); } ~LinkBuffer() @@ -80,28 +89,32 @@ public: void link(Call call, FunctionPtr function) { ASSERT(call.isFlagSet(Call::Linkable)); + call.m_jmp = applyOffset(call.m_jmp); MacroAssembler::linkCall(code(), call, function); } void link(Jump jump, CodeLocationLabel label) { + jump.m_jmp = applyOffset(jump.m_jmp); MacroAssembler::linkJump(code(), jump, label); } void link(JumpList list, CodeLocationLabel label) { for (unsigned i = 0; i < list.m_jumps.size(); ++i) - MacroAssembler::linkJump(code(), list.m_jumps[i], label); + link(list.m_jumps[i], label); } void patch(DataLabelPtr label, void* value) { - MacroAssembler::linkPointer(code(), label.m_label, value); + JmpDst target = applyOffset(label.m_label); + MacroAssembler::linkPointer(code(), target, value); } void patch(DataLabelPtr label, CodeLocationLabel value) { - MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress()); + JmpDst target = applyOffset(label.m_label); + MacroAssembler::linkPointer(code(), target, value.executableAddress()); } // These methods are used to obtain handles to allow the code to be relinked / repatched later. @@ -110,35 +123,36 @@ public: { ASSERT(call.isFlagSet(Call::Linkable)); ASSERT(!call.isFlagSet(Call::Near)); - return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp)); + return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp))); } CodeLocationNearCall locationOfNearCall(Call call) { ASSERT(call.isFlagSet(Call::Linkable)); ASSERT(call.isFlagSet(Call::Near)); - return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp)); + return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp))); } CodeLocationLabel locationOf(Label label) { - return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label)); + return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); } CodeLocationDataLabelPtr locationOf(DataLabelPtr label) { - return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label)); + return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); } CodeLocationDataLabel32 locationOf(DataLabel32 label) { - return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label)); + return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); } // This method obtains the return address of the call, given as an offset from // the start of the code. unsigned returnAddressOffset(Call call) { + call.m_jmp = applyOffset(call.m_jmp); return MacroAssembler::getLinkerCallReturnOffset(call); } @@ -152,6 +166,7 @@ public: return CodeRef(m_code, m_executablePool, m_size); } + CodeLocationLabel finalizeCodeAddendum() { performFinalization(); @@ -159,7 +174,20 @@ public: return CodeLocationLabel(code()); } + CodePtr trampolineAt(Label label) + { + return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label))); + } + private: + template T applyOffset(T src) + { +#if ENABLE(BRANCH_COMPACTION) + src.m_offset -= m_assembler->executableOffsetFor(src.m_offset); +#endif + return src; + } + // Keep this private! - the underlying code should only be obtained externally via // finalizeCode() or finalizeCodeAddendum(). void* code() @@ -167,6 +195,77 @@ private: return m_code; } + void linkCode(void* linkOffset) + { + UNUSED_PARAM(linkOffset); + ASSERT(!m_code); +#if !ENABLE(BRANCH_COMPACTION) + m_code = m_assembler->m_assembler.executableCopy(m_executablePool.get()); + m_size = m_assembler->size(); +#else + size_t initialSize = m_assembler->size(); + m_code = (uint8_t*)m_executablePool->alloc(initialSize); + if (!m_code) + return; + ExecutableAllocator::makeWritable(m_code, m_assembler->size()); + uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode(); + uint8_t* outData = reinterpret_cast(m_code); + const uint8_t* linkBase = linkOffset ? reinterpret_cast(linkOffset) : outData; + int readPtr = 0; + int writePtr = 0; + Vector& jumpsToLink = m_assembler->jumpsToLink(); + unsigned jumpCount = jumpsToLink.size(); + for (unsigned i = 0; i < jumpCount; ++i) { + int offset = readPtr - writePtr; + ASSERT(!(offset & 1)); + + // Copy the instructions from the last jump to the current one. + size_t regionSize = jumpsToLink[i].from() - readPtr; + memcpy(outData + writePtr, inData + readPtr, regionSize); + m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset); + readPtr += regionSize; + writePtr += regionSize; + + // Calculate absolute address of the jump target, in the case of backwards + // branches we need to be precise, forward branches we are pessimistic + const uint8_t* target; + if (jumpsToLink[i].to() >= jumpsToLink[i].from()) + target = linkBase + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far + else + target = linkBase + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to()); + + JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], linkBase + writePtr, target); + // Compact branch if we can... + if (m_assembler->canCompact(jumpsToLink[i].type())) { + // Step back in the write stream + int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType); + if (delta) { + writePtr -= delta; + m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr); + } + } + jumpsToLink[i].setFrom(writePtr); + } + // Copy everything after the last jump + memcpy(outData + writePtr, inData + readPtr, m_assembler->size() - readPtr); + m_assembler->recordLinkOffsets(readPtr, m_assembler->size(), readPtr - writePtr); + + // Actually link everything (don't link if we've be given a linkoffset as it's a + // waste of time: linkOffset is used for recompiling to get exception info) + if (!linkOffset) { + for (unsigned i = 0; i < jumpCount; ++i) { + uint8_t* location = outData + jumpsToLink[i].from(); + uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to()); + m_assembler->link(jumpsToLink[i], location, target); + } + } + + jumpsToLink.clear(); + m_size = writePtr + m_assembler->size() - readPtr; + m_executablePool->tryShrink(m_code, initialSize, m_size); +#endif + } + void performFinalization() { #ifndef NDEBUG @@ -179,8 +278,9 @@ private: } RefPtr m_executablePool; - void* m_code; size_t m_size; + void* m_code; + MacroAssembler* m_assembler; #ifndef NDEBUG bool m_completed; #endif diff --git a/assembler/MacroAssemblerARM.h b/assembler/MacroAssemblerARM.h index 2a053d4..1bbb0cc 100644 --- a/assembler/MacroAssemblerARM.h +++ b/assembler/MacroAssemblerARM.h @@ -907,10 +907,18 @@ public: failureCases.append(branchTest32(Zero, dest)); } - void zeroDouble(FPRegisterID srcDest) + Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) { m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0)); - convertInt32ToDouble(ARMRegisters::S0, srcDest); + convertInt32ToDouble(ARMRegisters::S0, scratch); + return branchDouble(DoubleNotEqual, reg, scratch); + } + + Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch) + { + m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0)); + convertInt32ToDouble(ARMRegisters::S0, scratch); + return branchDouble(DoubleEqualOrUnordered, reg, scratch); } protected: diff --git a/assembler/MacroAssemblerARMv7.h b/assembler/MacroAssemblerARMv7.h index b9cc856..e3e928d 100644 --- a/assembler/MacroAssemblerARMv7.h +++ b/assembler/MacroAssemblerARMv7.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. * Copyright (C) 2010 University of Szeged * * Redistribution and use in source and binary forms, with or without @@ -45,6 +45,26 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler { inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); } public: + typedef ARMv7Assembler::LinkRecord LinkRecord; + typedef ARMv7Assembler::JumpType JumpType; + typedef ARMv7Assembler::JumpLinkType JumpLinkType; + + MacroAssemblerARMv7() + : m_inUninterruptedSequence(false) + { + } + + void beginUninterruptedSequence() { m_inUninterruptedSequence = true; } + void endUninterruptedSequence() { m_inUninterruptedSequence = false; } + Vector& jumpsToLink() { return m_assembler.jumpsToLink(); } + void* unlinkedCode() { return m_assembler.unlinkedCode(); } + bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); } + JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); } + JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); } + void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } + int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); } + void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); } + struct ArmAddress { enum AddressType { HasOffset, @@ -651,7 +671,7 @@ public: Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); unordered.link(this); - // We get here if either unordered, or equal. + // We get here if either unordered or equal. Jump result = makeJump(); notEqual.link(this); return result; @@ -682,9 +702,27 @@ public: failureCases.append(branchTest32(Zero, dest)); } - void zeroDouble(FPRegisterID dest) + Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID) { - m_assembler.vmov_F64_0(dest); + m_assembler.vcmpz_F64(reg); + m_assembler.vmrs(); + Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); + Jump result = makeBranch(ARMv7Assembler::ConditionNE); + unordered.link(this); + return result; + } + + Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID) + { + m_assembler.vcmpz_F64(reg); + m_assembler.vmrs(); + Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); + Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); + unordered.link(this); + // We get here if either unordered or equal. + Jump result = makeJump(); + notEqual.link(this); + return result; } // Stack manipulation operations: @@ -803,7 +841,7 @@ private: ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); if (armImm.isValid()) m_assembler.cmp(left, armImm); - if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) + else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) m_assembler.cmn(left, armImm); else { move(Imm32(imm), dataTempRegister); @@ -969,14 +1007,14 @@ public: void jump(RegisterID target) { - m_assembler.bx(target); + m_assembler.bx(target, ARMv7Assembler::JumpFixed); } // Address is a memory location containing the address to jump to void jump(Address address) { load32(address, dataTempRegister); - m_assembler.bx(dataTempRegister); + m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed); } @@ -1059,35 +1097,35 @@ public: void breakpoint() { - m_assembler.bkpt(); + m_assembler.bkpt(0); } Call nearCall() { moveFixedWidthEncoding(Imm32(0), dataTempRegister); - return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear); + return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::LinkableNear); } Call call() { moveFixedWidthEncoding(Imm32(0), dataTempRegister); - return Call(m_assembler.blx(dataTempRegister), Call::Linkable); + return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable); } Call call(RegisterID target) { - return Call(m_assembler.blx(target), Call::None); + return Call(m_assembler.blx(target, ARMv7Assembler::JumpFixed), Call::None); } Call call(Address address) { load32(address, dataTempRegister); - return Call(m_assembler.blx(dataTempRegister), Call::None); + return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::None); } void ret() { - m_assembler.bx(linkRegister); + m_assembler.bx(linkRegister, ARMv7Assembler::JumpFixed); } void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) @@ -1187,7 +1225,7 @@ public: { // Like a normal call, but don't link. moveFixedWidthEncoding(Imm32(0), dataTempRegister); - return Call(m_assembler.bx(dataTempRegister), Call::Linkable); + return Call(m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable); } Call makeTailRecursiveCall(Jump oldJump) @@ -1196,19 +1234,29 @@ public: return tailRecursiveCall(); } + + int executableOffsetFor(int location) + { + return m_assembler.executableOffsetFor(location); + } protected: + bool inUninterruptedSequence() + { + return m_inUninterruptedSequence; + } + ARMv7Assembler::JmpSrc makeJump() { moveFixedWidthEncoding(Imm32(0), dataTempRegister); - return m_assembler.bx(dataTempRegister); + return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition); } ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond) { m_assembler.it(cond, true, true); moveFixedWidthEncoding(Imm32(0), dataTempRegister); - return m_assembler.bx(dataTempRegister); + return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond); } ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); } ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); } @@ -1298,6 +1346,8 @@ private: { ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); } + + bool m_inUninterruptedSequence; }; } // namespace JSC diff --git a/assembler/MacroAssemblerX86Common.h b/assembler/MacroAssemblerX86Common.h index cb86da7..0731065 100644 --- a/assembler/MacroAssemblerX86Common.h +++ b/assembler/MacroAssemblerX86Common.h @@ -527,12 +527,19 @@ public: failureCases.append(m_assembler.jne()); } - void zeroDouble(FPRegisterID srcDest) + Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) { ASSERT(isSSE2Present()); - m_assembler.xorpd_rr(srcDest, srcDest); + m_assembler.xorpd_rr(scratch, scratch); + return branchDouble(DoubleNotEqual, reg, scratch); } + Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch) + { + ASSERT(isSSE2Present()); + m_assembler.xorpd_rr(scratch, scratch); + return branchDouble(DoubleEqualOrUnordered, reg, scratch); + } // Stack manipulation operations: // diff --git a/bytecompiler/BytecodeGenerator.h b/bytecompiler/BytecodeGenerator.h index 0bd8184..3667198 100644 --- a/bytecompiler/BytecodeGenerator.h +++ b/bytecompiler/BytecodeGenerator.h @@ -524,9 +524,29 @@ namespace JSC { bool m_regeneratingForExceptionInfo; CodeBlock* m_codeBlockBeingRegeneratedFrom; - static const unsigned s_maxEmitNodeDepth = 5000; + static const unsigned s_maxEmitNodeDepth = 3000; + + friend class IncreaseEmitNodeDepth; }; + class IncreaseEmitNodeDepth { + public: + IncreaseEmitNodeDepth(BytecodeGenerator& generator, unsigned count = 1) + : m_generator(generator) + , m_count(count) + { + m_generator.m_emitNodeDepth += count; + } + + ~IncreaseEmitNodeDepth() + { + m_generator.m_emitNodeDepth -= m_count; + } + + private: + BytecodeGenerator& m_generator; + unsigned m_count; + }; } #endif // BytecodeGenerator_h diff --git a/bytecompiler/NodesCodegen.cpp b/bytecompiler/NodesCodegen.cpp index 2cb781f..a7455e4 100644 --- a/bytecompiler/NodesCodegen.cpp +++ b/bytecompiler/NodesCodegen.cpp @@ -830,6 +830,8 @@ RegisterID* BinaryOpNode::emitStrcat(BytecodeGenerator& generator, RegisterID* d ASSERT(isAdd()); ASSERT(resultDescriptor().definitelyIsString()); + IncreaseEmitNodeDepth stackGuard(generator, 3); + // Create a list of expressions for all the adds in the tree of nodes we can convert into // a string concatenation. The rightmost node (c) is added first. The rightmost node is // added first, and the leftmost child is never added, so the vector produced for the @@ -1515,6 +1517,8 @@ RegisterID* ForNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) RegisterID* ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + IncreaseEmitNodeDepth stackGuard(generator); + RefPtr scope = generator.newLabelScope(LabelScope::Loop); if (!m_lexpr->isLocation()) @@ -1864,6 +1868,8 @@ RegisterID* TryNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) // NOTE: The catch and finally blocks must be labeled explicitly, so the // optimizer knows they may be jumped to from anywhere. + IncreaseEmitNodeDepth stackGuard(generator); + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); RefPtr