-__ZN7WebCore10StringImpl6createEPKcj
-__ZN7WebCore12AtomicString11addSlowCaseEPNS_10StringImplE
_JSCheckScriptSyntax
_JSClassCreate
_JSClassRelease
_JSWeakObjectMapCreate
_JSWeakObjectMapGet
_JSWeakObjectMapSet
-_WebCoreWebThreadIsLockedOrDisabled
_WTFLog
_WTFLogVerbose
_WTFReportArgumentAssertionFailure
_WTFReportAssertionFailureWithMessage
_WTFReportError
_WTFReportFatalError
+_WebCoreWebThreadIsLockedOrDisabled
__Z12jsRegExpFreeP8JSRegExp
__Z15jsRegExpCompilePKti24JSRegExpIgnoreCaseOption23JSRegExpMultilineOptionPjPPKc
__Z15jsRegExpExecutePK8JSRegExpPKtiiPii
__ZN3JSC23AbstractSamplingCounter4dumpEv
__ZN3JSC23objectProtoFuncToStringEPNS_9ExecStateEPNS_8JSObjectENS_7JSValueERKNS_7ArgListE
__ZN3JSC23setUpStaticFunctionSlotEPNS_9ExecStateEPKNS_9HashEntryEPNS_8JSObjectERKNS_10IdentifierERNS_12PropertySlotE
+__ZN3JSC24DynamicGlobalObjectScopeC1EPNS_9ExecStateEPNS_14JSGlobalObjectE
__ZN3JSC24createStackOverflowErrorEPNS_9ExecStateE
__ZN3JSC25evaluateInGlobalCallFrameERKNS_7UStringERNS_7JSValueEPNS_14JSGlobalObjectE
__ZN3JSC35createInterruptedExecutionExceptionEPNS_12JSGlobalDataE
__ZN7WebCore10StringImpl5toIntEPb
__ZN7WebCore10StringImpl5upperEv
__ZN7WebCore10StringImpl6createEPKc
+__ZN7WebCore10StringImpl6createEPKcj
__ZN7WebCore10StringImpl6createEPKtj
__ZN7WebCore10StringImpl6createEPKtjN3WTF10PassRefPtrINS3_21CrossThreadRefCountedINS3_16OwnFastMallocPtrIS1_EEEEEE
__ZN7WebCore10StringImpl6secureEtb
__ZN7WebCore10StringImpl9substringEjj
__ZN7WebCore10StringImplD1Ev
__ZN7WebCore11commentAtomE
+__ZN7WebCore12AtomicString11addSlowCaseEPNS_10StringImplE
__ZN7WebCore12AtomicString3addEPKc
__ZN7WebCore12AtomicString3addEPKt
__ZN7WebCore12AtomicString3addEPKtj
__ZTVN3JSC8JSStringE
_jscore_fastmalloc_introspection
_kJSClassDefinitionEmpty
+
+# iOS Methods
+__ZN3JSC12JSGlobalData20sharedInstanceExistsEv
--- /dev/null
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "ARMv7Assembler.h"
+
+namespace JSC {
+
+const int ARMv7Assembler::JumpSizes[] = { 0xffffffff, sizeof(uint16_t), sizeof(uint16_t),
+ 2 * sizeof(uint16_t), 2 * sizeof(uint16_t), 3 * sizeof(uint16_t), 5 * sizeof(uint16_t), 6 * sizeof(uint16_t) };
+const int ARMv7Assembler::JumpPaddingSizes[] = { 0, 5 * sizeof(uint16_t), 6 * sizeof(uint16_t),
+ 5 * sizeof(uint16_t), 6 * sizeof(uint16_t) };
+
+}
+
+#endif
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
}
if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
- encoding.immediate = bytes.byte0;
+ encoding.immediate = bytes.byte1;
encoding.pattern = 2;
return ARMThumbImmediate(TypeEncoded, encoding);
}
};
struct {
unsigned type : 2;
- unsigned amount : 5;
+ unsigned amount : 6;
};
} m_u;
};
-
class ARMv7Assembler {
public:
~ARMv7Assembler()
ConditionGT,
ConditionLE,
ConditionAL,
-
+
ConditionCS = ConditionHS,
ConditionCC = ConditionLO,
} Condition;
+ enum JumpType { JumpFixed, JumpNoCondition, JumpCondition, JumpNoConditionFixedSize, JumpConditionFixedSize, JumpTypeCount };
+ enum JumpLinkType { LinkInvalid, LinkJumpT1, LinkJumpT2, LinkJumpT3,
+ LinkJumpT4, LinkConditionalJumpT4, LinkBX, LinkConditionalBX, JumpLinkTypeCount };
+ static const int JumpSizes[JumpLinkTypeCount];
+ static const int JumpPaddingSizes[JumpTypeCount];
+ class LinkRecord {
+ public:
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+ : m_from(from)
+ , m_to(to)
+ , m_type(type)
+ , m_linkType(LinkInvalid)
+ , m_condition(condition)
+ {
+ }
+ intptr_t from() const { return m_from; }
+ void setFrom(intptr_t from) { m_from = from; }
+ intptr_t to() const { return m_to; }
+ JumpType type() const { return m_type; }
+ JumpLinkType linkType() const { return m_linkType; }
+ void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; }
+ Condition condition() const { return m_condition; }
+ private:
+ intptr_t m_from : 31;
+ intptr_t m_to : 31;
+ JumpType m_type : 3;
+ JumpLinkType m_linkType : 4;
+ Condition m_condition : 16;
+ };
+
class JmpSrc {
friend class ARMv7Assembler;
friend class ARMInstructionFormatter;
+ friend class LinkBuffer;
public:
JmpSrc()
: m_offset(-1)
}
private:
- JmpSrc(int offset)
+ JmpSrc(int offset, JumpType type)
+ : m_offset(offset)
+ , m_condition(0xffff)
+ , m_type(type)
+ {
+ ASSERT(m_type == JumpFixed || m_type == JumpNoCondition || m_type == JumpNoConditionFixedSize);
+ }
+
+ JmpSrc(int offset, JumpType type, Condition condition)
: m_offset(offset)
+ , m_condition(condition)
+ , m_type(type)
{
+ ASSERT(m_type == JumpFixed || m_type == JumpCondition || m_type == JumpConditionFixedSize);
}
int m_offset;
+ Condition m_condition : 16;
+ JumpType m_type : 16;
+
};
class JmpDst {
friend class ARMv7Assembler;
friend class ARMInstructionFormatter;
+ friend class LinkBuffer;
public:
JmpDst()
: m_offset(-1)
private:
- struct LinkRecord {
- LinkRecord(intptr_t from, intptr_t to)
- : from(from)
- , to(to)
- {
- }
-
- intptr_t from;
- intptr_t to;
- };
-
// ARMv7, Appx-A.6.3
bool BadReg(RegisterID reg)
{
} OpcodeID;
typedef enum {
+ OP_B_T1 = 0xD000,
+ OP_B_T2 = 0xE000,
OP_AND_reg_T2 = 0xEA00,
OP_TST_reg_T2 = 0xEA10,
OP_ORR_reg_T2 = 0xEA40,
OP_VADD_T2 = 0xEE30,
OP_VSUB_T2 = 0xEE30,
OP_VDIV = 0xEE80,
- OP_VCMP_T1 = 0xEEB0,
+ OP_VCMP = 0xEEB0,
OP_VCVT_FPIVFP = 0xEEB0,
OP_VMOV_IMM_T2 = 0xEEB0,
OP_VMRS = 0xEEB0,
+ OP_B_T3a = 0xF000,
OP_B_T4a = 0xF000,
OP_AND_imm_T1 = 0xF000,
OP_TST_imm = 0xF010,
OP_VMOV_CtoSb = 0x0A10,
OP_VMOV_StoCb = 0x0A10,
OP_VMRSb = 0x0A10,
- OP_VCMP_T1b = 0x0A40,
+ OP_VCMPb = 0x0A40,
OP_VCVT_FPIVFPb = 0x0A40,
OP_VSUB_T2b = 0x0A40,
OP_NOP_T2b = 0x8000,
+ OP_B_T3b = 0x8000,
OP_B_T4b = 0x9000,
} OpcodeID2;
| (ifThenElseConditionBit(condition, inst3if) << 2)
| (ifThenElseConditionBit(condition, inst4if) << 1)
| 1;
- ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
return (condition << 4) | mask;
}
uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| (ifThenElseConditionBit(condition, inst3if) << 2)
| 2;
- ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
return (condition << 4) | mask;
}
uint8_t ifThenElse(Condition condition, bool inst2if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| 4;
- ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
return (condition << 4) | mask;
}
uint8_t ifThenElse(Condition condition)
{
int mask = 8;
- ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
return (condition << 4) | mask;
}
public:
-
+
void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
// Rd can only be SP if Rn is also SP.
ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
-
+
// Only allowed in IT (if then) block if last instruction.
- JmpSrc b()
+ JmpSrc b(JumpType type)
{
m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
- return JmpSrc(m_formatter.size());
+ return JmpSrc(m_formatter.size(), type);
}
// Only allowed in IT (if then) block if last instruction.
- JmpSrc blx(RegisterID rm)
+ JmpSrc blx(RegisterID rm, JumpType type)
{
ASSERT(rm != ARMRegisters::pc);
m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
- return JmpSrc(m_formatter.size());
+ return JmpSrc(m_formatter.size(), type);
}
// Only allowed in IT (if then) block if last instruction.
- JmpSrc bx(RegisterID rm)
+ JmpSrc bx(RegisterID rm, JumpType type, Condition condition)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return JmpSrc(m_formatter.size(), type, condition);
+ }
+
+ JmpSrc bx(RegisterID rm, JumpType type)
{
m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
- return JmpSrc(m_formatter.size());
+ return JmpSrc(m_formatter.size(), type);
}
void bkpt(uint8_t imm=0)
void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
{
- m_formatter.vfpOp(OP_VCMP_T1, OP_VCMP_T1b, true, VFPOperand(4), rd, rm);
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
+ }
+
+ void vcmpz_F64(FPDoubleRegisterID rd)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
}
void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm)
m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
}
- void vmov_F64_0(FPDoubleRegisterID rd)
- {
- m_formatter.vfpOp(OP_VMOV_IMM_T2, OP_VMOV_IMM_T2b, true, VFPOperand(0), rd, VFPOperand(0));
- }
-
void vmov(RegisterID rd, FPSingleRegisterID rn)
{
ASSERT(!BadReg(rd));
{
return dst.m_offset - src.m_offset;
}
+
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
+ }
+
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JumpPaddingSizes[jumpType] - JumpSizes[jumpLinkType]; }
// Assembler admin methods:
return m_formatter.size();
}
- void* executableCopy(ExecutablePool* allocator)
+ static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
{
- void* copy = m_formatter.executableCopy(allocator);
+ return a.from() < b.from();
+ }
- unsigned jumpCount = m_jumpsToLink.size();
- for (unsigned i = 0; i < jumpCount; ++i) {
- uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from);
- uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to);
- linkJumpAbsolute(location, target);
+ bool canCompact(JumpType jumpType)
+ {
+ // The following cannot be compacted:
+ // JumpFixed: represents custom jump sequence
+ // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
+ // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
+ return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
+ }
+
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ {
+ if (jumpType == JumpFixed)
+ return LinkInvalid;
+
+ // for patchable jump we must leave space for the longest code sequence
+ if (jumpType == JumpNoConditionFixedSize)
+ return LinkBX;
+ if (jumpType == JumpConditionFixedSize)
+ return LinkConditionalBX;
+
+ const int paddingSize = JumpPaddingSizes[jumpType];
+ bool mayTriggerErrata = false;
+
+ if (jumpType == JumpCondition) {
+ // 2-byte conditional T1
+ const uint16_t* jumpT1Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT1]));
+ if (canBeJumpT1(jumpT1Location, to))
+ return LinkJumpT1;
+ // 4-byte conditional T3
+ const uint16_t* jumpT3Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT3]));
+ if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) {
+ if (!mayTriggerErrata)
+ return LinkJumpT3;
+ }
+ // 4-byte conditional T4 with IT
+ const uint16_t* conditionalJumpT4Location =
+ reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkConditionalJumpT4]));
+ if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) {
+ if (!mayTriggerErrata)
+ return LinkConditionalJumpT4;
+ }
+ } else {
+ // 2-byte unconditional T2
+ const uint16_t* jumpT2Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT2]));
+ if (canBeJumpT2(jumpT2Location, to))
+ return LinkJumpT2;
+ // 4-byte unconditional T4
+ const uint16_t* jumpT4Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT4]));
+ if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) {
+ if (!mayTriggerErrata)
+ return LinkJumpT4;
+ }
+ // use long jump sequence
+ return LinkBX;
+ }
+
+ ASSERT(jumpType == JumpCondition);
+ return LinkConditionalBX;
+ }
+
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ {
+ JumpLinkType linkType = computeJumpType(record.type(), from, to);
+ record.setLinkType(linkType);
+ return linkType;
+ }
+
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+ {
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+ }
+
+ Vector<LinkRecord>& jumpsToLink()
+ {
+ std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+ return m_jumpsToLink;
+ }
+
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ {
+ switch (record.linkType()) {
+ case LinkJumpT1:
+ linkJumpT1(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+ break;
+ case LinkJumpT2:
+ linkJumpT2(reinterpret_cast<uint16_t*>(from), to);
+ break;
+ case LinkJumpT3:
+ linkJumpT3(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+ break;
+ case LinkJumpT4:
+ linkJumpT4(reinterpret_cast<uint16_t*>(from), to);
+ break;
+ case LinkConditionalJumpT4:
+ linkConditionalJumpT4(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+ break;
+ case LinkConditionalBX:
+ linkConditionalBX(record.condition(), reinterpret_cast<uint16_t*>(from), to);
+ break;
+ case LinkBX:
+ linkBX(reinterpret_cast<uint16_t*>(from), to);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ break;
}
- m_jumpsToLink.clear();
-
- ASSERT(copy);
- return copy;
}
+ void* unlinkedCode() { return m_formatter.data(); }
+
static unsigned getCallReturnOffset(JmpSrc call)
{
ASSERT(call.m_offset >= 0);
{
ASSERT(to.m_offset != -1);
ASSERT(from.m_offset != -1);
- m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset));
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, from.m_type, from.m_condition));
}
static void linkJump(void* code, JmpSrc from, void* to)
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
-
- ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
}
static void repatchInt32(void* where, int32_t value)
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
setInt32(where, value);
-
- ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
}
static void repatchPointer(void* where, void* value)
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
setPointer(where, value);
-
- ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
}
static void repatchLoadPtrToLEA(void* where)
return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
}
- static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ static bool canBeJumpT1(const uint16_t* instruction, const void* target)
{
- // FIMXE: this should be up in the MacroAssembler layer. :-(
- const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
-
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
-
- ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
- || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) );
-
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 23) >> 23) == relative;
+ }
+
+ static bool canBeJumpT2(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 20) >> 20) == relative;
+ }
+
+ static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
-
// From Cortex-A8 errata:
// If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
// the target of the branch falls within the first region it is
// to enter a deadlock state.
// The instruction is spanning two pages if it ends at an address ending 0x002
bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
+ mayTriggerErrata = spansTwo4K;
// The target is in the first page if the jump branch back by [3..0x1002] bytes
bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
-
- if (((relative << 7) >> 7) == relative && !wouldTriggerA8Errata) {
- // ARM encoding for the top two bits below the sign bit is 'peculiar'.
- if (relative >= 0)
- relative ^= 0xC00000;
-
- // All branch offsets should be an even distance.
- ASSERT(!(relative & 1));
+ return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata;
+ }
+
+ static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // From Cortex-A8 errata:
+ // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
+ // the target of the branch falls within the first region it is
+ // possible for the processor to incorrectly determine the branch
+ // instruction, and it is also possible in some cases for the processor
+ // to enter a deadlock state.
+ // The instruction is spanning two pages if it ends at an address ending 0x002
+ bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
+ mayTriggerErrata = spansTwo4K;
+ // The target is in the first page if the jump branch back by [3..0x1002] bytes
+ bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
+ bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
+ return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata;
+ }
+
+ void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT1(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+ }
+
+ static void linkJumpT2(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT2(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+ }
+
+ void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ bool scratch;
+ UNUSED_PARAM(scratch);
+ ASSERT(canBeJumpT3(instruction, target, scratch));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+ instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkJumpT4(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ bool scratch;
+ UNUSED_PARAM(scratch);
+ ASSERT(canBeJumpT4(instruction, target, scratch));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ }
+
+ void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ instruction[-3] = ifThenElse(cond) | OP_IT;
+ linkJumpT4(instruction, target);
+ }
+
+ static void linkBX(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+
+ void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ linkBX(instruction, target);
+ instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+ }
+
+ static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+ || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+
+ bool scratch;
+ if (canBeJumpT4(instruction, target, scratch)) {
// There may be a better way to fix this, but right now put the NOPs first, since in the
// case of an conditional branch this will be coming after an ITTT predicating *three*
// instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
instruction[-5] = OP_NOP_T1;
instruction[-4] = OP_NOP_T2a;
instruction[-3] = OP_NOP_T2b;
- instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
- instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ linkJumpT4(instruction, target);
} else {
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
}
}
-
+
static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
{
return op | (imm.m_value.i << 10) | imm.m_value.imm4;
}
+
static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
{
return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
} m_formatter;
Vector<LinkRecord> m_jumpsToLink;
+ Vector<int32_t> m_offsets;
};
} // namespace JSC
// Section 3: Misc admin methods
-
- static CodePtr trampolineAt(CodeRef ref, Label label)
- {
- return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
- }
-
size_t size()
{
return m_assembler.size();
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
+
+ void beginUninterruptedSequence() { }
+ void endUninterruptedSequence() { }
protected:
AssemblerType m_assembler;
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
//
class LinkBuffer : public Noncopyable {
typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
typedef MacroAssembler::Label Label;
typedef MacroAssembler::Jump Jump;
typedef MacroAssembler::JumpList JumpList;
typedef MacroAssembler::Call Call;
typedef MacroAssembler::DataLabel32 DataLabel32;
typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+ typedef MacroAssembler::JmpDst JmpDst;
+#if ENABLE(BRANCH_COMPACTION)
+ typedef MacroAssembler::LinkRecord LinkRecord;
+ typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
public:
// Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
// First, executablePool is copied into m_executablePool, then the initialization of
// m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
- LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
+ // The linkOffset parameter should only be non-null when recompiling for exception info
+ LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool, void* linkOffset)
: m_executablePool(executablePool)
- , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
- , m_size(masm->m_assembler.size())
+ , m_size(0)
+ , m_code(0)
+ , m_assembler(masm)
#ifndef NDEBUG
, m_completed(false)
#endif
{
+ linkCode(linkOffset);
}
~LinkBuffer()
void link(Call call, FunctionPtr function)
{
ASSERT(call.isFlagSet(Call::Linkable));
+ call.m_jmp = applyOffset(call.m_jmp);
MacroAssembler::linkCall(code(), call, function);
}
void link(Jump jump, CodeLocationLabel label)
{
+ jump.m_jmp = applyOffset(jump.m_jmp);
MacroAssembler::linkJump(code(), jump, label);
}
void link(JumpList list, CodeLocationLabel label)
{
for (unsigned i = 0; i < list.m_jumps.size(); ++i)
- MacroAssembler::linkJump(code(), list.m_jumps[i], label);
+ link(list.m_jumps[i], label);
}
void patch(DataLabelPtr label, void* value)
{
- MacroAssembler::linkPointer(code(), label.m_label, value);
+ JmpDst target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value);
}
void patch(DataLabelPtr label, CodeLocationLabel value)
{
- MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
+ JmpDst target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value.executableAddress());
}
// These methods are used to obtain handles to allow the code to be relinked / repatched later.
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(!call.isFlagSet(Call::Near));
- return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp)));
}
CodeLocationNearCall locationOfNearCall(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(call.isFlagSet(Call::Near));
- return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp)));
}
CodeLocationLabel locationOf(Label label)
{
- return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
{
- return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
CodeLocationDataLabel32 locationOf(DataLabel32 label)
{
- return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
// This method obtains the return address of the call, given as an offset from
// the start of the code.
unsigned returnAddressOffset(Call call)
{
+ call.m_jmp = applyOffset(call.m_jmp);
return MacroAssembler::getLinkerCallReturnOffset(call);
}
return CodeRef(m_code, m_executablePool, m_size);
}
+
CodeLocationLabel finalizeCodeAddendum()
{
performFinalization();
return CodeLocationLabel(code());
}
+ CodePtr trampolineAt(Label label)
+ {
+ return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+ }
+
private:
+ template <typename T> T applyOffset(T src)
+ {
+#if ENABLE(BRANCH_COMPACTION)
+ src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
+#endif
+ return src;
+ }
+
// Keep this private! - the underlying code should only be obtained externally via
// finalizeCode() or finalizeCodeAddendum().
void* code()
return m_code;
}
+ void linkCode(void* linkOffset)
+ {
+ UNUSED_PARAM(linkOffset);
+ ASSERT(!m_code);
+#if !ENABLE(BRANCH_COMPACTION)
+ m_code = m_assembler->m_assembler.executableCopy(m_executablePool.get());
+ m_size = m_assembler->size();
+#else
+ size_t initialSize = m_assembler->size();
+ m_code = (uint8_t*)m_executablePool->alloc(initialSize);
+ if (!m_code)
+ return;
+ ExecutableAllocator::makeWritable(m_code, m_assembler->size());
+ uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+ uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ const uint8_t* linkBase = linkOffset ? reinterpret_cast<uint8_t*>(linkOffset) : outData;
+ int readPtr = 0;
+ int writePtr = 0;
+ Vector<LinkRecord>& jumpsToLink = m_assembler->jumpsToLink();
+ unsigned jumpCount = jumpsToLink.size();
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ size_t regionSize = jumpsToLink[i].from() - readPtr;
+ memcpy(outData + writePtr, inData + readPtr, regionSize);
+ m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = linkBase + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = linkBase + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], linkBase + writePtr, target);
+ // Compact branch if we can...
+ if (m_assembler->canCompact(jumpsToLink[i].type())) {
+ // Step back in the write stream
+ int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
+ }
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ // Copy everything after the last jump
+ memcpy(outData + writePtr, inData + readPtr, m_assembler->size() - readPtr);
+ m_assembler->recordLinkOffsets(readPtr, m_assembler->size(), readPtr - writePtr);
+
+ // Actually link everything (don't link if we've be given a linkoffset as it's a
+ // waste of time: linkOffset is used for recompiling to get exception info)
+ if (!linkOffset) {
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint8_t* location = outData + jumpsToLink[i].from();
+ uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+ m_assembler->link(jumpsToLink[i], location, target);
+ }
+ }
+
+ jumpsToLink.clear();
+ m_size = writePtr + m_assembler->size() - readPtr;
+ m_executablePool->tryShrink(m_code, initialSize, m_size);
+#endif
+ }
+
void performFinalization()
{
#ifndef NDEBUG
}
RefPtr<ExecutablePool> m_executablePool;
- void* m_code;
size_t m_size;
+ void* m_code;
+ MacroAssembler* m_assembler;
#ifndef NDEBUG
bool m_completed;
#endif
failureCases.append(branchTest32(Zero, dest));
}
- void zeroDouble(FPRegisterID srcDest)
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
{
m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
- convertInt32ToDouble(ARMRegisters::S0, srcDest);
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
}
protected:
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
public:
+ typedef ARMv7Assembler::LinkRecord LinkRecord;
+ typedef ARMv7Assembler::JumpType JumpType;
+ typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+
+ MacroAssemblerARMv7()
+ : m_inUninterruptedSequence(false)
+ {
+ }
+
+ void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
+ void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
+ Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+ void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+ bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+
struct ArmAddress {
enum AddressType {
HasOffset,
Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
unordered.link(this);
- // We get here if either unordered, or equal.
+ // We get here if either unordered or equal.
Jump result = makeJump();
notEqual.link(this);
return result;
failureCases.append(branchTest32(Zero, dest));
}
- void zeroDouble(FPRegisterID dest)
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
{
- m_assembler.vmov_F64_0(dest);
+ m_assembler.vcmpz_F64(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.vcmpz_F64(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = makeJump();
+ notEqual.link(this);
+ return result;
}
// Stack manipulation operations:
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
if (armImm.isValid())
m_assembler.cmp(left, armImm);
- if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
m_assembler.cmn(left, armImm);
else {
move(Imm32(imm), dataTempRegister);
void jump(RegisterID target)
{
- m_assembler.bx(target);
+ m_assembler.bx(target, ARMv7Assembler::JumpFixed);
}
// Address is a memory location containing the address to jump to
void jump(Address address)
{
load32(address, dataTempRegister);
- m_assembler.bx(dataTempRegister);
+ m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed);
}
void breakpoint()
{
- m_assembler.bkpt();
+ m_assembler.bkpt(0);
}
Call nearCall()
{
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::LinkableNear);
}
Call call()
{
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable);
}
Call call(RegisterID target)
{
- return Call(m_assembler.blx(target), Call::None);
+ return Call(m_assembler.blx(target, ARMv7Assembler::JumpFixed), Call::None);
}
Call call(Address address)
{
load32(address, dataTempRegister);
- return Call(m_assembler.blx(dataTempRegister), Call::None);
+ return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::None);
}
void ret()
{
- m_assembler.bx(linkRegister);
+ m_assembler.bx(linkRegister, ARMv7Assembler::JumpFixed);
}
void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
{
// Like a normal call, but don't link.
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ return Call(m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable);
}
Call makeTailRecursiveCall(Jump oldJump)
return tailRecursiveCall();
}
+
+ int executableOffsetFor(int location)
+ {
+ return m_assembler.executableOffsetFor(location);
+ }
protected:
+ bool inUninterruptedSequence()
+ {
+ return m_inUninterruptedSequence;
+ }
+
ARMv7Assembler::JmpSrc makeJump()
{
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return m_assembler.bx(dataTempRegister);
+ return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
}
ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
{
m_assembler.it(cond, true, true);
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return m_assembler.bx(dataTempRegister);
+ return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
}
ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
{
ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
+
+ bool m_inUninterruptedSequence;
};
} // namespace JSC
failureCases.append(m_assembler.jne());
}
- void zeroDouble(FPRegisterID srcDest)
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
{
ASSERT(isSSE2Present());
- m_assembler.xorpd_rr(srcDest, srcDest);
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
}
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
// Stack manipulation operations:
//
bool m_regeneratingForExceptionInfo;
CodeBlock* m_codeBlockBeingRegeneratedFrom;
- static const unsigned s_maxEmitNodeDepth = 5000;
+ static const unsigned s_maxEmitNodeDepth = 3000;
+
+ friend class IncreaseEmitNodeDepth;
};
+ class IncreaseEmitNodeDepth {
+ public:
+ IncreaseEmitNodeDepth(BytecodeGenerator& generator, unsigned count = 1)
+ : m_generator(generator)
+ , m_count(count)
+ {
+ m_generator.m_emitNodeDepth += count;
+ }
+
+ ~IncreaseEmitNodeDepth()
+ {
+ m_generator.m_emitNodeDepth -= m_count;
+ }
+
+ private:
+ BytecodeGenerator& m_generator;
+ unsigned m_count;
+ };
}
#endif // BytecodeGenerator_h
ASSERT(isAdd());
ASSERT(resultDescriptor().definitelyIsString());
+ IncreaseEmitNodeDepth stackGuard(generator, 3);
+
// Create a list of expressions for all the adds in the tree of nodes we can convert into
// a string concatenation. The rightmost node (c) is added first. The rightmost node is
// added first, and the leftmost child is never added, so the vector produced for the
RegisterID* ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
{
+ IncreaseEmitNodeDepth stackGuard(generator);
+
RefPtr<LabelScope> scope = generator.newLabelScope(LabelScope::Loop);
if (!m_lexpr->isLocation())
// NOTE: The catch and finally blocks must be labeled explicitly, so the
// optimizer knows they may be jumped to from anywhere.
+ IncreaseEmitNodeDepth stackGuard(generator);
+
generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine());
RefPtr<Label> tryStartLabel = generator.newLabel();
continue;
ExecState* exec = function->scope().globalObject()->JSGlobalObject::globalExec();
- executable->recompile(exec);
+ executable->recompile();
if (function->scope().globalObject()->debugger() == this)
sourceProviders.add(executable->source().provider(), exec);
}
return poolAllocate(n);
}
+ void tryShrink(void* allocation, size_t oldSize, size_t newSize)
+ {
+ if (static_cast<char*>(allocation) + oldSize != m_freePtr)
+ return;
+ m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
+ }
+
~ExecutablePool()
{
AllocationList::const_iterator end = m_pools.end();
size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
+ static bool underMemoryPressure();
+
private:
static Allocation systemAlloc(size_t n);
static void systemRelease(const Allocation& alloc);
namespace JSC {
-#if CPU(X86_64)
- // These limits suitable on 64-bit platforms (particularly x86-64, where we require all jumps to have a 2Gb max range).
- #define VM_POOL_SIZE (2u * 1024u * 1024u * 1024u) // 2Gb
- #define COALESCE_LIMIT (16u * 1024u * 1024u) // 16Mb
-#else
- // These limits are hopefully sensible on embedded platforms.
- #define VM_POOL_SIZE (32u * 1024u * 1024u) // 32Mb
- #define COALESCE_LIMIT (4u * 1024u * 1024u) // 4Mb
-#endif
+#define TwoPow(n) (1ull << n)
-// ASLR currently only works on darwin (due to arc4random) & 64-bit (due to address space size).
-#define VM_POOL_ASLR (OS(DARWIN) && CPU(X86_64))
-
-// FreeListEntry describes a free chunk of memory, stored in the freeList.
-struct FreeListEntry {
- FreeListEntry(void* pointer, size_t size)
- : pointer(pointer)
- , size(size)
- , nextEntry(0)
- , less(0)
- , greater(0)
- , balanceFactor(0)
+class AllocationTableSizeClass {
+public:
+ AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
+ : m_blockSize(blockSize)
{
+ ASSERT(blockSize == TwoPow(log2BlockSize));
+
+ // Calculate the number of blocks needed to hold size.
+ size_t blockMask = blockSize - 1;
+ m_blockCount = (size + blockMask) >> log2BlockSize;
+
+ // Align to the smallest power of two >= m_blockCount.
+ m_blockAlignment = 1;
+ while (m_blockAlignment < m_blockCount)
+ m_blockAlignment += m_blockAlignment;
}
- // All entries of the same size share a single entry
- // in the AVLTree, and are linked together in a linked
- // list, using nextEntry.
- void* pointer;
- size_t size;
- FreeListEntry* nextEntry;
+ size_t blockSize() const { return m_blockSize; }
+ size_t blockCount() const { return m_blockCount; }
+ size_t blockAlignment() const { return m_blockAlignment; }
- // These fields are used by AVLTree.
- FreeListEntry* less;
- FreeListEntry* greater;
- int balanceFactor;
-};
+ size_t size()
+ {
+ return m_blockSize * m_blockCount;
+ }
-// Abstractor class for use in AVLTree.
-// Nodes in the AVLTree are of type FreeListEntry, keyed on
-// (and thus sorted by) their size.
-struct AVLTreeAbstractorForFreeList {
- typedef FreeListEntry* handle;
- typedef int32_t size;
- typedef size_t key;
-
- handle get_less(handle h) { return h->less; }
- void set_less(handle h, handle lh) { h->less = lh; }
- handle get_greater(handle h) { return h->greater; }
- void set_greater(handle h, handle gh) { h->greater = gh; }
- int get_balance_factor(handle h) { return h->balanceFactor; }
- void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
-
- static handle null() { return 0; }
-
- int compare_key_key(key va, key vb) { return va - vb; }
- int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
- int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
+private:
+ size_t m_blockSize;
+ size_t m_blockCount;
+ size_t m_blockAlignment;
};
-// Used to reverse sort an array of FreeListEntry pointers.
-static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
-{
- FreeListEntry* left = *(FreeListEntry**)leftPtr;
- FreeListEntry* right = *(FreeListEntry**)rightPtr;
+template<unsigned log2Entries>
+class AllocationTableLeaf {
+ typedef uint64_t BitField;
- return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
-}
+public:
+ static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
+ static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
-// Used to reverse sort an array of pointers.
-static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
-{
- void* left = *(void**)leftPtr;
- void* right = *(void**)rightPtr;
+ static const size_t subregionSize = TwoPow(log2SubregionSize);
+ static const size_t regionSize = TwoPow(log2RegionSize);
+ static const unsigned entries = TwoPow(log2Entries);
+ COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
- return (intptr_t)right - (intptr_t)left;
-}
+ AllocationTableLeaf()
+ : m_allocated(0)
+ {
+ }
-class FixedVMPoolAllocator
-{
- // The free list is stored in a sorted tree.
- typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
+ ~AllocationTableLeaf()
+ {
+ ASSERT(isEmpty());
+ }
- // Use madvise as apropriate to prevent freed pages from being spilled,
- // and to attempt to ensure that used memory is reported correctly.
-#if HAVE(MADV_FREE_REUSE)
- void release(void* position, size_t size)
+ size_t allocate(AllocationTableSizeClass& sizeClass)
{
- while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+ ASSERT(sizeClass.blockSize() == subregionSize);
+ ASSERT(!isFull());
+
+ size_t alignment = sizeClass.blockAlignment();
+ size_t count = sizeClass.blockCount();
+ // Use this mask to check for spans of free blocks.
+ BitField mask = ((1ull << count) - 1) << (alignment - count);
+
+ // Step in units of alignment size.
+ for (unsigned i = 0; i < entries; i += alignment) {
+ if (!(m_allocated & mask)) {
+ m_allocated |= mask;
+ return (i + (alignment - count)) << log2SubregionSize;
+ }
+ mask <<= alignment;
+ }
+ return notFound;
}
- void reuse(void* position, size_t size)
+ void free(size_t location, AllocationTableSizeClass& sizeClass)
{
- while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+ ASSERT(sizeClass.blockSize() == subregionSize);
+
+ size_t entry = location >> log2SubregionSize;
+ size_t count = sizeClass.blockCount();
+ BitField mask = ((1ull << count) - 1) << entry;
+
+ ASSERT((m_allocated & mask) == mask);
+ m_allocated &= ~mask;
}
-#elif HAVE(MADV_FREE)
- void release(void* position, size_t size)
+
+ bool isEmpty()
{
- while (madvise(position, size, MADV_FREE) == -1 && errno == EAGAIN) { }
+ return !m_allocated;
}
-
- void reuse(void*, size_t) {}
-#elif HAVE(MADV_DONTNEED)
- void release(void* position, size_t size)
+
+ bool isFull()
{
- while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+ return !~m_allocated;
}
- void reuse(void*, size_t) {}
-#else
- void release(void*, size_t) {}
- void reuse(void*, size_t) {}
+ static size_t size()
+ {
+ return regionSize;
+ }
+
+ static AllocationTableSizeClass classForSize(size_t size)
+ {
+ return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
+ }
+
+#ifndef NDEBUG
+ void dump(size_t parentOffset = 0, unsigned indent = 0)
+ {
+ for (unsigned i = 0; i < indent; ++i)
+ fprintf(stderr, " ");
+ fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
+ }
#endif
- // All addition to the free list should go through this method, rather than
- // calling insert directly, to avoid multiple entries beging added with the
- // same key. All nodes being added should be singletons, they should not
- // already be a part of a chain.
- void addToFreeList(FreeListEntry* entry)
- {
- ASSERT(!entry->nextEntry);
-
- if (entry->size == m_commonSize) {
- m_commonSizedAllocations.append(entry->pointer);
- delete entry;
- } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
- // m_freeList already contain an entry for this size - insert this node into the chain.
- entry->nextEntry = entryInFreeList->nextEntry;
- entryInFreeList->nextEntry = entry;
- } else
- m_freeList.insert(entry);
- }
-
- // We do not attempt to coalesce addition, which may lead to fragmentation;
- // instead we periodically perform a sweep to try to coalesce neigboring
- // entries in m_freeList. Presently this is triggered at the point 16MB
- // of memory has been released.
- void coalesceFreeSpace()
- {
- Vector<FreeListEntry*> freeListEntries;
- SizeSortedFreeTree::Iterator iter;
- iter.start_iter_least(m_freeList);
-
- // Empty m_freeList into a Vector.
- for (FreeListEntry* entry; (entry = *iter); ++iter) {
- // Each entry in m_freeList might correspond to multiple
- // free chunks of memory (of the same size). Walk the chain
- // (this is likely of couse only be one entry long!) adding
- // each entry to the Vector (at reseting the next in chain
- // pointer to separate each node out).
- FreeListEntry* next;
- do {
- next = entry->nextEntry;
- entry->nextEntry = 0;
- freeListEntries.append(entry);
- } while ((entry = next));
+private:
+ BitField m_allocated;
+};
+
+
+template<class NextLevel>
+class LazyAllocationTable {
+public:
+ static const unsigned log2RegionSize = NextLevel::log2RegionSize;
+ static const unsigned entries = NextLevel::entries;
+
+ LazyAllocationTable()
+ : m_ptr(0)
+ {
+ }
+
+ ~LazyAllocationTable()
+ {
+ ASSERT(isEmpty());
+ }
+
+ size_t allocate(AllocationTableSizeClass& sizeClass)
+ {
+ if (!m_ptr)
+ m_ptr = new NextLevel();
+ return m_ptr->allocate(sizeClass);
+ }
+
+ void free(size_t location, AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(m_ptr);
+ m_ptr->free(location, sizeClass);
+ if (m_ptr->isEmpty()) {
+ delete m_ptr;
+ m_ptr = 0;
}
- // All entries are now in the Vector; purge the tree.
- m_freeList.purge();
-
- // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
- // We reverse-sort so that we can logically work forwards through memory,
- // whilst popping items off the end of the Vectors using last() and removeLast().
- qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
- qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
-
- // The entries from m_commonSizedAllocations that cannot be
- // coalesced into larger chunks will be temporarily stored here.
- Vector<void*> newCommonSizedAllocations;
-
- // Keep processing so long as entries remain in either of the vectors.
- while (freeListEntries.size() || m_commonSizedAllocations.size()) {
- // We're going to try to find a FreeListEntry node that we can coalesce onto.
- FreeListEntry* coalescionEntry = 0;
-
- // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
- if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
- // Pop an item from the m_commonSizedAllocations vector - this is the lowest
- // addressed free chunk. Find out the begin and end addresses of the memory chunk.
- void* begin = m_commonSizedAllocations.last();
- void* end = (void*)((intptr_t)begin + m_commonSize);
- m_commonSizedAllocations.removeLast();
-
- // Try to find another free chunk abutting onto the end of the one we have already found.
- if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
- // There is an existing FreeListEntry for the next chunk of memory!
- // we can reuse this. Pop it off the end of m_freeList.
- coalescionEntry = freeListEntries.last();
- freeListEntries.removeLast();
- // Update the existing node to include the common-sized chunk that we also found.
- coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
- coalescionEntry->size += m_commonSize;
- } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
- // There is a second common-sized chunk that can be coalesced.
- // Allocate a new node.
- m_commonSizedAllocations.removeLast();
- coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
- } else {
- // Nope - this poor little guy is all on his own. :-(
- // Add him into the newCommonSizedAllocations vector for now, we're
- // going to end up adding him back into the m_commonSizedAllocations
- // list when we're done.
- newCommonSizedAllocations.append(begin);
+ }
+
+ bool isEmpty()
+ {
+ return !m_ptr;
+ }
+
+ bool isFull()
+ {
+ return m_ptr && m_ptr->isFull();
+ }
+
+ static size_t size()
+ {
+ return NextLevel::size();
+ }
+
+#ifndef NDEBUG
+ void dump(size_t parentOffset = 0, unsigned indent = 0)
+ {
+ ASSERT(m_ptr);
+ m_ptr->dump(parentOffset, indent);
+ }
+#endif
+
+ static AllocationTableSizeClass classForSize(size_t size)
+ {
+ return NextLevel::classForSize(size);
+ }
+
+private:
+ NextLevel* m_ptr;
+};
+
+template<class NextLevel, unsigned log2Entries>
+class AllocationTableDirectory {
+ typedef uint64_t BitField;
+
+public:
+ static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
+ static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
+
+ static const size_t subregionSize = TwoPow(log2SubregionSize);
+ static const size_t regionSize = TwoPow(log2RegionSize);
+ static const unsigned entries = TwoPow(log2Entries);
+ COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
+
+ AllocationTableDirectory()
+ : m_full(0)
+ , m_hasSuballocation(0)
+ {
+ }
+
+ ~AllocationTableDirectory()
+ {
+ ASSERT(isEmpty());
+ }
+
+ size_t allocate(AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(sizeClass.blockSize() <= subregionSize);
+ ASSERT(!isFull());
+
+ if (sizeClass.blockSize() < subregionSize) {
+ BitField bit = 1;
+ for (unsigned i = 0; i < entries; ++i, bit += bit) {
+ if (m_full & bit)
continue;
+ size_t location = m_suballocations[i].allocate(sizeClass);
+ if (location != notFound) {
+ // If this didn't already have a subregion, it does now!
+ m_hasSuballocation |= bit;
+ // Mirror the suballocation's full bit.
+ if (m_suballocations[i].isFull())
+ m_full |= bit;
+ return (i * subregionSize) + location;
}
- } else {
- ASSERT(freeListEntries.size());
- ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
- // The lowest addressed item is from m_freeList; pop it from the Vector.
- coalescionEntry = freeListEntries.last();
- freeListEntries.removeLast();
}
-
- // Right, we have a FreeListEntry, we just need check if there is anything else
- // to coalesce onto the end.
- ASSERT(coalescionEntry);
- while (true) {
- // Calculate the end address of the chunk we have found so far.
- void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
-
- // Is there another chunk adjacent to the one we already have?
- if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
- // Yes - another FreeListEntry -pop it from the list.
- FreeListEntry* coalescee = freeListEntries.last();
- freeListEntries.removeLast();
- // Add it's size onto our existing node.
- coalescionEntry->size += coalescee->size;
- delete coalescee;
- } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
- // We can coalesce the next common-sized chunk.
- m_commonSizedAllocations.removeLast();
- coalescionEntry->size += m_commonSize;
- } else
- break; // Nope, nothing to be added - stop here.
+ return notFound;
+ }
+
+ // A block is allocated if either it is fully allocated or contains suballocations.
+ BitField allocated = m_full | m_hasSuballocation;
+
+ size_t alignment = sizeClass.blockAlignment();
+ size_t count = sizeClass.blockCount();
+ // Use this mask to check for spans of free blocks.
+ BitField mask = ((1ull << count) - 1) << (alignment - count);
+
+ // Step in units of alignment size.
+ for (unsigned i = 0; i < entries; i += alignment) {
+ if (!(allocated & mask)) {
+ m_full |= mask;
+ return (i + (alignment - count)) << log2SubregionSize;
}
+ mask <<= alignment;
+ }
+ return notFound;
+ }
- // We've coalesced everything we can onto the current chunk.
- // Add it back into m_freeList.
- addToFreeList(coalescionEntry);
+ void free(size_t location, AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(sizeClass.blockSize() <= subregionSize);
+
+ size_t entry = location >> log2SubregionSize;
+
+ if (sizeClass.blockSize() < subregionSize) {
+ BitField bit = 1ull << entry;
+ m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
+ // Check if the suballocation is now empty.
+ if (m_suballocations[entry].isEmpty())
+ m_hasSuballocation &= ~bit;
+ // No need to check, it clearly isn't full any more!
+ m_full &= ~bit;
+ } else {
+ size_t count = sizeClass.blockCount();
+ BitField mask = ((1ull << count) - 1) << entry;
+ ASSERT((m_full & mask) == mask);
+ ASSERT(!(m_hasSuballocation & mask));
+ m_full &= ~mask;
}
+ }
- // All chunks of free memory larger than m_commonSize should be
- // back in m_freeList by now. All that remains to be done is to
- // copy the contents on the newCommonSizedAllocations back into
- // the m_commonSizedAllocations Vector.
- ASSERT(m_commonSizedAllocations.size() == 0);
- m_commonSizedAllocations.append(newCommonSizedAllocations);
+ bool isEmpty()
+ {
+ return !(m_full | m_hasSuballocation);
}
-public:
+ bool isFull()
+ {
+ return !~m_full;
+ }
+
+ static size_t size()
+ {
+ return regionSize;
+ }
- FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
- : m_commonSize(commonSize)
- , m_countFreedSinceLastCoalesce(0)
- , m_totalHeapSize(totalHeapSize)
- {
- // Cook up an address to allocate at, using the following recipe:
- // 17 bits of zero, stay in userspace kids.
- // 26 bits of randomness for ASLR.
- // 21 bits of zero, at least stay aligned within one level of the pagetables.
- //
- // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
- // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
- // 2^24, which should put up somewhere in the middle of usespace (in the address range
- // 0x200000000000 .. 0x5fffffffffff).
- intptr_t randomLocation = 0;
-#if VM_POOL_ASLR
- randomLocation = arc4random() & ((1 << 25) - 1);
- randomLocation += (1 << 24);
- randomLocation <<= 21;
+ static AllocationTableSizeClass classForSize(size_t size)
+ {
+ if (size < subregionSize) {
+ AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
+ if (sizeClass.size() < NextLevel::size())
+ return sizeClass;
+ }
+ return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
+ }
+
+#ifndef NDEBUG
+ void dump(size_t parentOffset = 0, unsigned indent = 0)
+ {
+ for (unsigned i = 0; i < indent; ++i)
+ fprintf(stderr, " ");
+ fprintf(stderr, "%08x: [", (int)parentOffset);
+ for (unsigned i = 0; i < entries; ++i) {
+ BitField bit = 1ull << i;
+ char c = m_hasSuballocation & bit
+ ? (m_full & bit ? 'N' : 'n')
+ : (m_full & bit ? 'F' : '-');
+ fprintf(stderr, "%c", c);
+ }
+ fprintf(stderr, "]\n");
+
+ for (unsigned i = 0; i < entries; ++i) {
+ BitField bit = 1ull << i;
+ size_t offset = parentOffset | (subregionSize * i);
+ if (m_hasSuballocation & bit)
+ m_suballocations[i].dump(offset, indent + 1);
+ }
+ }
+#endif
+
+private:
+ NextLevel m_suballocations[entries];
+ // Subregions exist in one of four states:
+ // (1) empty (both bits clear)
+ // (2) fully allocated as a single allocation (m_full set)
+ // (3) partially allocated through suballocations (m_hasSuballocation set)
+ // (4) fully allocated through suballocations (both bits set)
+ BitField m_full;
+ BitField m_hasSuballocation;
+};
+
+typedef AllocationTableLeaf<6> PageTables256KB;
+typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
+typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
+typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
+
+#if CPU(ARM)
+typedef PageTables16MB FixedVMPoolPageTables;
+#elif CPU(X86_64) && !OS(LINUX)
+typedef PageTables1GB FixedVMPoolPageTables;
+#else
+typedef PageTables32MB FixedVMPoolPageTables;
#endif
- m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MMAP_FLAGS, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
+
+class FixedVMPoolAllocator
+{
+public:
+ FixedVMPoolAllocator()
+ {
+ m_base = mmap(0, FixedVMPoolPageTables::size(), INITIAL_PROTECTION_FLAGS, MMAP_FLAGS, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
if (m_base == MAP_FAILED) {
#if ENABLE(INTERPRETER)
// worrying about it's previous state, and also makes coalescing m_freeList
// simpler since we need not worry about the possibility of coalescing released
// chunks with non-released ones.
- release(m_base, m_totalHeapSize);
- m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
+ release(m_base, FixedVMPoolPageTables::size());
}
}
-
+
void* alloc(size_t size)
{
-#if ENABLE(INTERPRETER)
- if (!m_base)
- return 0;
-#else
- ASSERT(m_base);
-#endif
- void* result;
-
- // Freed allocations of the common size are not stored back into the main
- // m_freeList, but are instead stored in a separate vector. If the request
- // is for a common sized allocation, check this list.
- if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
- result = m_commonSizedAllocations.last();
- m_commonSizedAllocations.removeLast();
- } else {
- // Serach m_freeList for a suitable sized chunk to allocate memory from.
- FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
-
- // This would be bad news.
- if (!entry) {
- // Errk! Lets take a last-ditch desparation attempt at defragmentation...
- coalesceFreeSpace();
- // Did that free up a large enough chunk?
- entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
- // No?... *BOOM!*
- if (!entry)
- CRASH();
- }
- ASSERT(entry->size != m_commonSize);
-
- // Remove the entry from m_freeList. But! -
- // Each entry in the tree may represent a chain of multiple chunks of the
- // same size, and we only want to remove one on them. So, if this entry
- // does have a chain, just remove the first-but-one item from the chain.
- if (FreeListEntry* next = entry->nextEntry) {
- // We're going to leave 'entry' in the tree; remove 'next' from its chain.
- entry->nextEntry = next->nextEntry;
- next->nextEntry = 0;
- entry = next;
- } else
- m_freeList.remove(entry->size);
-
- // Whoo!, we have a result!
- ASSERT(entry->size >= size);
- result = entry->pointer;
-
- // If the allocation exactly fits the chunk we found in the,
- // m_freeList then the FreeListEntry node is no longer needed.
- if (entry->size == size)
- delete entry;
- else {
- // There is memory left over, and it is not of the common size.
- // We can reuse the existing FreeListEntry node to add this back
- // into m_freeList.
- entry->pointer = (void*)((intptr_t)entry->pointer + size);
- entry->size -= size;
- addToFreeList(entry);
- }
- }
+ ASSERT(size);
+ AllocationTableSizeClass sizeClass = classForSize(size);
+ ASSERT(sizeClass.size());
+ if (sizeClass.size() >= FixedVMPoolPageTables::size())
+ CRASH();
+
+ if (m_pages.isFull())
+ CRASH();
+ size_t offset = m_pages.allocate(sizeClass);
+ if (offset == notFound)
+ CRASH();
- // Call reuse to report to the operating system that this memory is in use.
- ASSERT(isWithinVMPool(result, size));
+ void* result = offsetToPointer(offset);
reuse(result, size);
return result;
}
void free(void* pointer, size_t size)
{
- ASSERT(m_base);
- // Call release to report to the operating system that this
- // memory is no longer in use, and need not be paged out.
- ASSERT(isWithinVMPool(pointer, size));
release(pointer, size);
- // Common-sized allocations are stored in the m_commonSizedAllocations
- // vector; all other freed chunks are added to m_freeList.
- if (size == m_commonSize)
- m_commonSizedAllocations.append(pointer);
- else
- addToFreeList(new FreeListEntry(pointer, size));
-
- // Do some housekeeping. Every time we reach a point that
- // 16MB of allocations have been freed, sweep m_freeList
- // coalescing any neighboring fragments.
- m_countFreedSinceLastCoalesce += size;
- if (m_countFreedSinceLastCoalesce >= COALESCE_LIMIT) {
- m_countFreedSinceLastCoalesce = 0;
- coalesceFreeSpace();
- }
+ ASSERT(size);
+ AllocationTableSizeClass sizeClass = classForSize(size);
+ ASSERT(sizeClass.size());
+ ASSERT(sizeClass.size() < FixedVMPoolPageTables::size());
+
+ m_pages.free(pointerToOffset(pointer), sizeClass);
}
- bool isValid() const { return !!m_base; }
+ bool isValid() const
+ {
+ return !!m_base;
+ }
private:
+ // Use madvise as apropriate to prevent freed pages from being spilled,
+ // and to attempt to ensure that used memory is reported correctly.
+#if HAVE(MADV_FREE_REUSE)
+ void release(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+ }
-#ifndef NDEBUG
- bool isWithinVMPool(void* pointer, size_t size)
+ void reuse(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+ }
+#elif HAVE(MADV_FREE)
+ void release(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_FREE) == -1 && errno == EAGAIN) { }
+ }
+
+ void reuse(void*, size_t) {}
+#elif HAVE(MADV_DONTNEED)
+ void release(void* position, size_t size)
{
- return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
+ while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
}
+
+ void reuse(void*, size_t) {}
+#else
+ void release(void*, size_t) {}
+ void reuse(void*, size_t) {}
#endif
- // Freed space from the most common sized allocations will be held in this list, ...
- const size_t m_commonSize;
- Vector<void*> m_commonSizedAllocations;
+ AllocationTableSizeClass classForSize(size_t size)
+ {
+ return FixedVMPoolPageTables::classForSize(size);
+ }
- // ... and all other freed allocations are held in m_freeList.
- SizeSortedFreeTree m_freeList;
+ void* offsetToPointer(size_t offset)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_base) + offset);
+ }
- // This is used for housekeeping, to trigger defragmentation of the freed lists.
- size_t m_countFreedSinceLastCoalesce;
+ size_t pointerToOffset(void* pointer)
+ {
+ return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_base);
+ }
void* m_base;
- size_t m_totalHeapSize;
+ FixedVMPoolPageTables m_pages;
};
void ExecutableAllocator::intializePageSize()
}
static FixedVMPoolAllocator* allocator = 0;
+static size_t allocatedCount = 0;
static SpinLock spinlock = SPINLOCK_INITIALIZER;
bool ExecutableAllocator::isValid() const
{
SpinLockHolder lock_holder(&spinlock);
if (!allocator)
- allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE);
+ allocator = new FixedVMPoolAllocator();
return allocator->isValid();
}
SpinLockHolder lock_holder(&spinlock);
if (!allocator)
- allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE);
+ allocator = new FixedVMPoolAllocator();
ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
+ allocatedCount += size;
return alloc;
}
ASSERT(allocator);
allocator->free(allocation.pages, allocation.size);
+ allocatedCount -= allocation.size;
+}
+
+bool ExecutablePool::underMemoryPressure()
+{
+ // Technically we should take the spin lock here, but we don't
+ // care if we get stale data. This is only really a heuristic
+ // anyway.
+ return allocatedCount > (FixedVMPoolPageTables::size() / 2);
}
}
ASSERT_UNUSED(result, !result);
}
+bool ExecutablePool::underMemoryPressure()
+{
+ return false;
+}
+
bool ExecutableAllocator::isValid() const
{
return true;
VirtualFree(alloc.pages, 0, MEM_RELEASE);
}
+bool ExecutablePool::underMemoryPressure()
+{
+ return false;
+}
+
bool ExecutableAllocator::isValid() const
{
return true;
repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
}
-JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset)
: m_interpreter(globalData->interpreter)
, m_globalData(globalData)
, m_codeBlock(codeBlock)
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
, m_jumpTargetsPosition(0)
#endif
+ , m_linkerOffset(linkerOffset)
{
}
ASSERT(m_jmpTable.isEmpty());
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ RefPtr<ExecutablePool> executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size());
+ if (!executablePool)
+ return JITCode();
+ LinkBuffer patchBuffer(this, executablePool.release(), m_linkerOffset);
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
static const int patchGetByIdDefaultOffset = 256;
public:
- static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock)
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, void* offsetBase = 0)
{
- return JIT(globalData, codeBlock).privateCompile();
+ return JIT(globalData, codeBlock, offsetBase).privateCompile();
}
static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
if (!globalData->canUseJIT())
return;
- JIT jit(globalData);
+ JIT jit(globalData, 0, 0);
jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
}
}
};
- JIT(JSGlobalData*, CodeBlock* = 0);
+ JIT(JSGlobalData*, CodeBlock* = 0, void* = 0);
void privateCompileMainPass();
void privateCompileLinkPass();
#endif
#endif // USE(JSVALUE32_64)
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
-#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); endUninterruptedSequence(); } while (false)
void beginUninterruptedSequence(int, int);
void endUninterruptedSequence(int, int);
#else
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
-#define END_UNINTERRUPTED_SEQUENCE(name)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false)
#endif
void emit_op_add(Instruction*);
int m_uninterruptedConstantSequenceBegin;
#endif
#endif
+ void* m_linkerOffset;
static PassRefPtr<NativeExecutable> stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
} JIT_CLASS_ALIGNMENT;
#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0);
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
CodeRef finalCode = patchBuffer.finalizeCode();
*executablePool = finalCode.m_executablePool;
- trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
#if ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
- trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
+ trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(patchBuffer.trampolineAt(nativeCallThunk)))));
#endif
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
#else
UNUSED_PARAM(ctiStringLengthTrampoline);
#endif
#if ENABLE(JIT_OPTIMIZE_CALL)
- trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
#else
UNUSED_PARAM(ctiVirtualCallLink);
#endif
#if ENABLE(JIT_OPTIMIZE_MOD)
- trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
+ trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
#endif
}
addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
+ emitLoadDouble(cond, fpRegT0);
+ addJump(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target);
} else
addSlowCase(isNotInteger);
addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
+ emitLoadDouble(cond, fpRegT0);
+ addJump(branchDoubleNonZero(fpRegT0, fpRegT1), target);
} else
addSlowCase(isNotInteger);
jit.move(Imm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(&jit, pool);
+ LinkBuffer patchBuffer(&jit, pool, 0);
return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
}
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
} else
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
jit.move(Imm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(&jit, pool);
+ LinkBuffer patchBuffer(&jit, pool, 0);
return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
}
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
move(Imm32(JSValue::Int32Tag), regT1);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
PassRefPtr<NativeExecutable> finalize()
{
- LinkBuffer patchBuffer(this, m_pool.get());
+ LinkBuffer patchBuffer(this, m_pool.get(), 0);
patchBuffer.link(m_failures, CodeLocationLabel(m_globalData->jitStubs->ctiNativeCallThunk()->generatedJITCode().addressForCall()));
return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
}
}
}
+void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
+{
+ m_tempSortingVectors.append(tempVector);
+}
+
+void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
+{
+ ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
+ m_tempSortingVectors.removeLast();
+}
+
+void Heap::markTempSortVectors(MarkStack& markStack)
+{
+ typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
+
+ VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
+ for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
+ Vector<ValueStringPair>* tempSortingVector = *it;
+
+ Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
+ for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt)
+ if (vectorIt->first)
+ markStack.append(vectorIt->first);
+ markStack.drain();
+ }
+}
+
void Heap::clearMarkBits()
{
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
// Mark explicitly registered roots.
markProtectedObjects(markStack);
+
+ // Mark temporary vector for Array sorting
+ markTempSortVectors(markStack);
// Mark misc. other roots.
if (m_markListSet && m_markListSet->size())
#ifndef Collector_h
#define Collector_h
+#include "JSValue.h"
#include <stddef.h>
#include <string.h>
#include <wtf/HashCountedSet.h>
void markConservatively(MarkStack&, void* start, void* end);
+ void pushTempSortVector(WTF::Vector<ValueStringPair>*);
+ void popTempSortVector(WTF::Vector<ValueStringPair>*);
+
HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = new HashSet<MarkedArgumentBuffer*>; return *m_markListSet; }
JSGlobalData* globalData() const { return m_globalData; }
void markRoots();
void markProtectedObjects(MarkStack&);
+ void markTempSortVectors(MarkStack&);
void markCurrentThreadConservatively(MarkStack&);
void markCurrentThreadConservativelyInternal(MarkStack&);
void markOtherThreadConservatively(MarkStack&, Thread*);
CollectorHeap m_heap;
ProtectCountSet m_protectedValues;
+ WTF::Vector<WTF::Vector<ValueStringPair>* > m_tempSortingVectors;
HashSet<MarkedArgumentBuffer*>* m_markListSet;
if (globalData->canUseJIT())
#endif
{
- JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+ JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), generatedJITCode().start());
ASSERT(newJITCode.size() == generatedJITCode().size());
}
#endif
if (globalData->canUseJIT())
#endif
{
- JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+ JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), generatedJITCode().start());
ASSERT(newJITCode.size() == generatedJITCode().size());
}
#endif
return newCodeBlock->extractExceptionInfo();
}
-void FunctionExecutable::recompile(ExecState*)
+void FunctionExecutable::recompile()
{
delete m_codeBlock;
m_codeBlock = 0;
unsigned variableCount() const { return m_numVariables; }
UString paramString() const;
- void recompile(ExecState*);
+ void recompile();
ExceptionInfo* reparseExceptionInfo(JSGlobalData*, ScopeChainNode*, CodeBlock*);
void markAggregate(MarkStack& markStack);
static PassRefPtr<FunctionExecutable> fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, int* errLine = 0, UString* errMsg = 0);
return (da > db) - (da < db);
}
-typedef std::pair<JSValue, UString> ValueStringPair;
-
static int compareByStringPairForQSort(const void* a, const void* b)
{
const ValueStringPair* va = static_cast<const ValueStringPair*>(a);
throwOutOfMemoryError(exec);
return;
}
+
+ Heap::heap(this)->pushTempSortVector(&values);
for (size_t i = 0; i < lengthNotIncludingUndefined; i++) {
JSValue value = m_storage->m_vector[i];
values[i].first = value;
}
- // FIXME: While calling these toString functions, the array could be mutated.
- // In that case, objects pointed to by values in this vector might get garbage-collected!
-
// FIXME: The following loop continues to call toString on subsequent values even after
// a toString call raises an exception.
for (size_t i = 0; i < lengthNotIncludingUndefined; i++)
values[i].second = values[i].first.toString(exec);
- if (exec->hadException())
+ if (exec->hadException()) {
+ Heap::heap(this)->popTempSortVector(&values);
return;
+ }
// FIXME: Since we sort by string value, a fast algorithm might be to use a radix sort. That would be O(N) rather
// than O(N log N).
qsort(values.begin(), values.size(), sizeof(ValueStringPair), compareByStringPairForQSort);
#endif
- // FIXME: If the toString function changed the length of the array, this might be
- // modifying the vector incorrectly.
-
+ // If the toString function changed the length of the array or vector storage,
+ // increase the length to handle the orignal number of actual values.
+ if (m_vectorLength < lengthNotIncludingUndefined)
+ increaseVectorLength(lengthNotIncludingUndefined);
+ if (m_storage->m_length < lengthNotIncludingUndefined)
+ m_storage->m_length = lengthNotIncludingUndefined;
+
for (size_t i = 0; i < lengthNotIncludingUndefined; i++)
m_storage->m_vector[i] = values[i].first;
+ Heap::heap(this)->popTempSortVector(&values);
+
checkConsistency(SortConsistencyCheck);
}
#include "ArgList.h"
#include "Collector.h"
+#include "CollectorHeapIterator.h"
#include "CommonIdentifiers.h"
#include "FunctionConstructor.h"
#include "GetterSetter.h"
interpreter->dumpSampleData(exec);
}
+void JSGlobalData::recompileAllJSFunctions()
+{
+ // If JavaScript is running, it's not safe to recompile, since we'll end
+ // up throwing away code that is live on the stack.
+ ASSERT(!dynamicGlobalObject);
+
+ LiveObjectIterator it = heap.primaryHeapBegin();
+ LiveObjectIterator heapEnd = heap.primaryHeapEnd();
+ for ( ; it != heapEnd; ++it) {
+ if ((*it)->inherits(&JSFunction::info)) {
+ JSFunction* function = asFunction(*it);
+ if (!function->executable()->isHostFunction())
+ function->jsExecutable()->recompile();
+ }
+ }
+}
+
} // namespace JSC
#if ENABLE(ASSEMBLER)
ExecutableAllocator executableAllocator;
+ ExecutableAllocator regexAllocator;
#endif
#if ENABLE(JIT)
void startSampling();
void stopSampling();
void dumpSampleData(ExecState* exec);
+ void recompileAllJSFunctions();
RegExpCache* regExpCache() { return m_regExpCache; }
private:
JSGlobalData(GlobalDataType, ThreadStackType);
delete static_cast<JSGlobalObjectData*>(jsGlobalObjectData);
}
+DynamicGlobalObjectScope::DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject)
+ : m_dynamicGlobalObjectSlot(callFrame->globalData().dynamicGlobalObject)
+ , m_savedDynamicGlobalObject(m_dynamicGlobalObjectSlot)
+{
+ if (!m_dynamicGlobalObjectSlot) {
+#if ENABLE(JIT)
+ if (ExecutablePool::underMemoryPressure())
+ callFrame->globalData().recompileAllJSFunctions();
+#endif
+ m_dynamicGlobalObjectSlot = dynamicGlobalObject;
+
+ // Reset the date cache between JS invocations to force the VM
+ // to observe time zone changes.
+ callFrame->globalData().resetDateCache();
+ }
+}
+
} // namespace JSC
class DynamicGlobalObjectScope : public Noncopyable {
public:
- DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject)
- : m_dynamicGlobalObjectSlot(callFrame->globalData().dynamicGlobalObject)
- , m_savedDynamicGlobalObject(m_dynamicGlobalObjectSlot)
- {
- if (!m_dynamicGlobalObjectSlot) {
- m_dynamicGlobalObjectSlot = dynamicGlobalObject;
-
- // Reset the date cache between JS invocations to force the VM
- // to observe time zone changes.
- callFrame->globalData().resetDateCache();
- }
- }
+ DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject);
~DynamicGlobalObjectScope()
{
#include "StringPrototype.h"
namespace JSC {
+
+static const unsigned resolveRopeForSubstringCutoff = 4;
// Overview: this methods converts a JSString from holding a string in rope form
// down to a simple UString representation. It does so by building up the string
}
}
}
+
+// This function construsts a substring out of a rope without flattening by reusing the existing fibers.
+// This can reduce memory usage substantially. Since traversing ropes is slow the function will revert
+// back to flattening if the rope turns out to be long.
+JSString* JSString::substringFromRope(ExecState* exec, unsigned substringStart, unsigned substringLength)
+{
+ ASSERT(isRope());
+
+ JSGlobalData* globalData = &exec->globalData();
+
+ UString substringFibers[3];
+
+ unsigned fiberCount = 0;
+ unsigned substringFiberCount = 0;
+ unsigned substringEnd = substringStart + substringLength;
+ unsigned fiberEnd = 0;
+
+ RopeIterator end;
+ for (RopeIterator it(m_other.m_fibers, m_fiberCount); it != end; ++it) {
+ ++fiberCount;
+ UStringImpl* fiberString = *it;
+ unsigned fiberStart = fiberEnd;
+ fiberEnd = fiberStart + fiberString->length();
+ if (fiberEnd <= substringStart)
+ continue;
+ unsigned copyStart = std::max(substringStart, fiberStart);
+ unsigned copyEnd = std::min(substringEnd, fiberEnd);
+ if (copyStart == fiberStart && copyEnd == fiberEnd)
+ substringFibers[substringFiberCount++] = UString(fiberString);
+ else
+ substringFibers[substringFiberCount++] = UString(UStringImpl::create(fiberString, copyStart - fiberStart, copyEnd - copyStart));
+ if (fiberEnd >= substringEnd)
+ break;
+ if (fiberCount > resolveRopeForSubstringCutoff || substringFiberCount >= 3) {
+ // This turned out to be a really inefficient rope. Just flatten it.
+ resolveRope(exec);
+ return jsSubstring(&exec->globalData(), m_value, substringStart, substringLength);
+ }
+ }
+ ASSERT(substringFiberCount && substringFiberCount <= 3);
+
+ if (substringLength == 1) {
+ ASSERT(substringFiberCount == 1);
+ UChar c = substringFibers[0].data()[0];
+ if (c <= 0xFF)
+ return globalData->smallStrings.singleCharacterString(globalData, c);
+ }
+ if (substringFiberCount == 1)
+ return new (globalData) JSString(globalData, substringFibers[0]);
+ if (substringFiberCount == 2)
+ return new (globalData) JSString(globalData, substringFibers[0], substringFibers[1]);
+ return new (globalData) JSString(globalData, substringFibers[0], substringFibers[1], substringFibers[2]);
+}
JSValue JSString::replaceCharacter(ExecState* exec, UChar character, const UString& replacement)
{
}
void resolveRope(ExecState*) const;
+ JSString* substringFromRope(ExecState*, unsigned offset, unsigned length);
void appendStringInConstruct(unsigned& index, const UString& string)
{
friend JSValue jsString(ExecState* exec, Register* strings, unsigned count);
friend JSValue jsString(ExecState* exec, JSValue thisValue, const ArgList& args);
friend JSString* jsStringWithFinalizer(ExecState*, const UString&, JSStringFinalizerCallback callback, void* context);
+ friend JSString* jsSubstring(ExecState* exec, JSString* s, unsigned offset, unsigned length);
};
JSString* asString(JSValue);
JSGlobalData* globalData = &exec->globalData();
return fixupVPtr(globalData, new (globalData) JSString(globalData, s, callback, context));
}
+
+ inline JSString* jsSubstring(ExecState* exec, JSString* s, unsigned offset, unsigned length)
+ {
+ ASSERT(offset <= static_cast<unsigned>(s->length()));
+ ASSERT(length <= static_cast<unsigned>(s->length()));
+ ASSERT(offset + length <= static_cast<unsigned>(s->length()));
+ JSGlobalData* globalData = &exec->globalData();
+ if (!length)
+ return globalData->smallStrings.emptyString(globalData);
+ if (s->isRope())
+ return s->substringFromRope(exec, offset, length);
+ return jsSubstring(globalData, s->m_value, offset, length);
+ }
inline JSString* jsSubstring(JSGlobalData* globalData, const UString& s, unsigned offset, unsigned length)
{
return asValue() == jsNull();
}
#endif // USE(JSVALUE32_64)
-
+
+ typedef std::pair<JSValue, UString> ValueStringPair;
} // namespace JSC
#endif // JSValue_h
PassRefPtr<RegExp> RegExpCache::lookupOrCreate(const UString& patternString, const UString& flags)
{
- if (patternString.size() < maxCacheablePatternLength) {
+ if (isCacheable(patternString)) {
pair<HashMap<RegExpKey, RefPtr<RegExp> >::iterator, bool> result = m_cacheMap.add(RegExpKey(flags, patternString), 0);
if (!result.second)
return result.first->second;
PassRefPtr<RegExp> lookupOrCreate(const UString& patternString, const UString& flags);
PassRefPtr<RegExp> create(const UString& patternString, const UString& flags);
RegExpCache(JSGlobalData* globalData);
+
+ static bool isCacheable(const UString& patternString) { return patternString.size() < maxCacheablePatternLength; }
private:
static const unsigned maxCacheablePatternLength = 256;
- static const int maxCacheableEntries = 256;
+ static const int maxCacheableEntries = 32;
typedef HashMap<RegExpKey, RefPtr<RegExp> > RegExpCacheMap;
RegExpKey patternKeyArray[maxCacheableEntries];
}
JSValue JSC_HOST_CALL stringProtoFuncSubstr(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
-{
- UString s = thisValue.toThisString(exec);
- int len = s.size();
+{
+ int len;
+ JSString* jsString = 0;
+ UString uString;
+ if (thisValue.isString()) {
+ jsString = static_cast<JSString*>(thisValue.asCell());
+ len = jsString->length();
+ } else {
+ uString = thisValue.toThisObject(exec)->toString(exec);
+ len = uString.size();
+ }
JSValue a0 = args.at(0);
JSValue a1 = args.at(1);
}
if (start + length > len)
length = len - start;
- return jsSubstring(exec, s, static_cast<unsigned>(start), static_cast<unsigned>(length));
+
+ unsigned substringStart = static_cast<unsigned>(start);
+ unsigned substringLength = static_cast<unsigned>(length);
+ if (jsString)
+ return jsSubstring(exec, jsString, substringStart, substringLength);
+ return jsSubstring(exec, uString, substringStart, substringLength);
}
JSValue JSC_HOST_CALL stringProtoFuncSubstring(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
{
- UString s = thisValue.toThisString(exec);
- int len = s.size();
+ int len;
+ JSString* jsString = 0;
+ UString uString;
+ if (thisValue.isString()) {
+ jsString = static_cast<JSString*>(thisValue.asCell());
+ len = jsString->length();
+ } else {
+ uString = thisValue.toThisObject(exec)->toString(exec);
+ len = uString.size();
+ }
JSValue a0 = args.at(0);
JSValue a1 = args.at(1);
end = start;
start = temp;
}
- return jsSubstring(exec, s, static_cast<unsigned>(start), static_cast<unsigned>(end) - static_cast<unsigned>(start));
+ unsigned substringStart = static_cast<unsigned>(start);
+ unsigned substringLength = static_cast<unsigned>(end) - substringStart;
+ if (jsString)
+ return jsSubstring(exec, jsString, substringStart, substringLength);
+ return jsSubstring(exec, uString, substringStart, substringLength);
}
JSValue JSC_HOST_CALL stringProtoFuncToLowerCase(ExecState* exec, JSObject*, JSValue thisValue, const ArgList&)
{
// Allocate a buffer big enough to hold all the characters.
const unsigned length = size();
+ if (length > numeric_limits<unsigned>::max() / 3)
+ return CString();
Vector<char, 1024> buffer(length * 3);
// Convert to runs of 8-bit characters.
extern "C" {
malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
&FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
-
, 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
-
+ , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
};
}
#endif
#define ENABLE_CONTEXT_MENUS 0
+#define ENABLE_DISK_IMAGE_CACHE 1
#define ENABLE_DRAG_SUPPORT 0
#define ENABLE_FTPDIR 1
#define ENABLE_GEOLOCATION 1
#define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1
-#define ENABLE_JIT 0
-#define ENABLE_YARR 0
-#define ENABLE_YARR_JIT 0
-#ifdef __llvm__
-#define WTF_USE_JSVALUE32_64 1
+#if defined(WTF_ARM_ARCH_VERSION) && WTF_ARM_ARCH_VERSION >= 7
+ // ARMv7;
+ #define WTF_USE_JSVALUE32_64 1
+ #define ENABLE_INTERPRETER 1
+ #define ENABLE_JIT 1
+ #define ENABLE_YARR 1
+ #define ENABLE_YARR_JIT 1
#else
-#define WTF_USE_JSVALUE32 1
+ // ARMv6; never use the JIT, use JSVALUE32_64 only if compiling with llvm.
+ #define ENABLE_JIT 0
+ #define ENABLE_YARR 0
+ #define ENABLE_YARR_JIT 0
+ /* FIXME: <rdar://problem/7478149> gcc-4.2 compiler bug with USE(JSVALUE32_64) and armv6 target */
+ #ifdef __llvm__
+ #define WTF_USE_JSVALUE32_64 1
+ #else
+ #define WTF_USE_JSVALUE32 1
+ #endif
#endif
#undef ENABLE_3D_CANVAS
#define ENABLE_CONTEXT_MENUS 1
#endif
+#if !defined(ENABLE_DISK_IMAGE_CACHE)
+#define ENABLE_DISK_IMAGE_CACHE 0
+#endif
+
#if !defined(ENABLE_DRAG_SUPPORT)
#define ENABLE_DRAG_SUPPORT 1
#endif
#define ENABLE_JSC_ZOMBIES 0
+#if CPU(ARM_THUMB2)
+#define ENABLE_BRANCH_COMPACTION 1
+#endif
+
#endif /* WTF_Platform_h */
/*
- * Copyright (C) 2003, 2006, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003, 2006, 2008, 2009, 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include "config.h"
#include "CString.h"
-using std::min;
+using namespace std;
namespace WTF {
{
if (!str)
return;
-
+
+ if (length >= numeric_limits<size_t>::max())
+ CRASH();
+
m_buffer = CStringBuffer::create(length + 1);
memcpy(m_buffer->mutableData(), str, length);
m_buffer->mutableData()[length] = '\0';
CString CString::newUninitialized(size_t length, char*& characterBuffer)
{
+ if (length >= numeric_limits<size_t>::max())
+ CRASH();
+
CString result;
result.m_buffer = CStringBuffer::create(length + 1);
char* bytes = result.m_buffer->mutableData();
{
if (!m_buffer || m_buffer->hasOneRef())
return;
-
- int len = m_buffer->length();
- RefPtr<CStringBuffer> m_temp = m_buffer;
- m_buffer = CStringBuffer::create(len);
- memcpy(m_buffer->mutableData(), m_temp->data(), len);
+
+ RefPtr<CStringBuffer> buffer = m_buffer.release();
+ size_t length = buffer->length();
+ m_buffer = CStringBuffer::create(length);
+ memcpy(m_buffer->mutableData(), buffer->data(), length);
}
bool operator==(const CString& a, const CString& b)
// Allocate a single buffer large enough to contain the StringImpl
// struct as well as the data which it contains. This removes one
// heap allocation from this call.
- if (length > ((std::numeric_limits<size_t>::max() - sizeof(StringImpl)) / sizeof(UChar)))
+ if (length > ((std::numeric_limits<unsigned>::max() - sizeof(StringImpl)) / sizeof(UChar)))
CRASH();
size_t size = sizeof(StringImpl) + length * sizeof(UChar);
StringImpl* string = static_cast<StringImpl*>(fastMalloc(size));
return empty();
UChar* data;
- PassRefPtr<StringImpl> string = createUninitialized(length, data);
+ RefPtr<StringImpl> string = createUninitialized(length, data);
memcpy(data, characters, length * sizeof(UChar));
- return string;
+ return string.release();
}
PassRefPtr<StringImpl> StringImpl::create(const char* characters, unsigned length)
return empty();
UChar* data;
- PassRefPtr<StringImpl> string = createUninitialized(length, data);
+ RefPtr<StringImpl> string = createUninitialized(length, data);
for (unsigned i = 0; i != length; ++i) {
unsigned char c = characters[i];
data[i] = c;
}
- return string;
+ return string.release();
}
PassRefPtr<StringImpl> StringImpl::create(const char* string)
{
if (!string)
return empty();
- return create(string, strlen(string));
+ size_t length = strlen(string);
+ if (length > numeric_limits<unsigned>::max())
+ CRASH();
+ return create(string, length);
}
PassRefPtr<StringImpl> StringImpl::create(const UChar* characters, unsigned length, PassRefPtr<SharedUChar> sharedBuffer)
if (noUpper && !(ored & ~0x7F))
return this;
+ if (m_length > static_cast<unsigned>(numeric_limits<int32_t>::max()))
+ CRASH();
int32_t length = m_length;
+
UChar* data;
RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
// but in empirical testing, few actual calls to upper() are no-ops, so
// it wouldn't be worth the extra time for pre-scanning.
UChar* data;
- PassRefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+ RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+
+ if (m_length > static_cast<unsigned>(numeric_limits<int32_t>::max()))
+ CRASH();
int32_t length = m_length;
// Do a faster loop for the case where all the characters are ASCII.
data[i] = toASCIIUpper(c);
}
if (!(ored & ~0x7F))
- return newImpl;
+ return newImpl.release();
// Do a slower implementation for cases that include non-ASCII characters.
bool error;
Unicode::toUpper(data, realLength, m_data, m_length, &error);
if (error)
return this;
- return newImpl;
+ return newImpl.release();
}
-PassRefPtr<StringImpl> StringImpl::secure(UChar aChar, bool last)
+PassRefPtr<StringImpl> StringImpl::secure(UChar character, bool hideLastCharacter)
{
- int length = m_length;
- Vector<UChar> data(length);
- if (length > 0) {
- for (int i = 0; i < length - 1; ++i)
- data[i] = aChar;
- data[length - 1] = (last ? aChar : m_data[length - 1]);
+ UChar* data;
+ RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+ if (m_length) {
+ const unsigned lastCharacterIndex = m_length - 1;
+ for (unsigned i = 0; i < lastCharacterIndex; ++i)
+ data[i] = character;
+ data[lastCharacterIndex] = hideLastCharacter ? character : m_data[lastCharacterIndex];
}
- return adopt(data);
+ return newImpl.release();
}
PassRefPtr<StringImpl> StringImpl::foldCase()
{
UChar* data;
- PassRefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+ RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+
+ if (m_length > static_cast<unsigned>(numeric_limits<int32_t>::max()))
+ CRASH();
int32_t length = m_length;
// Do a faster loop for the case where all the characters are ASCII.
UChar ored = 0;
- for (int i = 0; i < length; i++) {
+ for (int32_t i = 0; i < length; i++) {
UChar c = m_data[i];
ored |= c;
data[i] = toASCIILower(c);
}
if (!(ored & ~0x7F))
- return newImpl;
+ return newImpl.release();
// Do a slower implementation for cases that include non-ASCII characters.
bool error;
int32_t realLength = Unicode::foldCase(data, length, m_data, m_length, &error);
if (!error && realLength == length)
- return newImpl;
+ return newImpl.release();
newImpl = createUninitialized(realLength, data);
Unicode::foldCase(data, realLength, m_data, m_length, &error);
if (error)
return this;
- return newImpl;
+ return newImpl.release();
}
PassRefPtr<StringImpl> StringImpl::stripWhiteSpace()
if (!chs || index < 0)
return -1;
- int chsLength = strlen(chs);
+ size_t matchStringLength = strlen(chs);
+ if (matchStringLength > static_cast<unsigned>(numeric_limits<int>::max()))
+ CRASH();
+ int chsLength = matchStringLength;
int n = m_length - index;
if (n < 0)
return -1;
return this;
UChar* data;
- PassRefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
+ RefPtr<StringImpl> newImpl = createUninitialized(m_length, data);
for (i = 0; i != m_length; ++i) {
UChar ch = m_data[i];
ch = newC;
data[i] = ch;
}
- return newImpl;
+ return newImpl.release();
}
PassRefPtr<StringImpl> StringImpl::replace(unsigned position, unsigned lengthToReplace, StringImpl* str)
if ((length() - lengthToReplace) >= (numeric_limits<unsigned>::max() - lengthToInsert))
CRASH();
- PassRefPtr<StringImpl> newImpl =
+ RefPtr<StringImpl> newImpl =
createUninitialized(length() - lengthToReplace + lengthToInsert, data);
memcpy(data, characters(), position * sizeof(UChar));
if (str)
memcpy(data + position, str->characters(), lengthToInsert * sizeof(UChar));
memcpy(data + position + lengthToInsert, characters() + position + lengthToReplace,
(length() - position - lengthToReplace) * sizeof(UChar));
- return newImpl;
+ return newImpl.release();
}
PassRefPtr<StringImpl> StringImpl::replace(UChar pattern, StringImpl* replacement)
newSize += replaceSize;
UChar* data;
- PassRefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
+ RefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
// Construct the new data
int srcSegmentEnd;
ASSERT(dstOffset + srcSegmentLength == static_cast<int>(newImpl->length()));
- return newImpl;
+ return newImpl.release();
}
PassRefPtr<StringImpl> StringImpl::replace(StringImpl* pattern, StringImpl* replacement)
newSize += matchCount * repStrLength;
UChar* data;
- PassRefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
+ RefPtr<StringImpl> newImpl = createUninitialized(newSize, data);
// Construct the new data
int srcSegmentEnd;
ASSERT(dstOffset + srcSegmentLength == static_cast<int>(newImpl->length()));
- return newImpl;
+ return newImpl.release();
}
bool equal(const StringImpl* a, const StringImpl* b)
PassRefPtr<StringImpl> StringImpl::createWithTerminatingNullCharacter(const StringImpl& string)
{
// Use createUninitialized instead of 'new StringImpl' so that the string and its buffer
- // get allocated in a single malloc block.
+ // get allocated in a single memory block.
UChar* data;
- int length = string.m_length;
+ unsigned length = string.m_length;
+ if (length >= numeric_limits<unsigned>::max())
+ CRASH();
RefPtr<StringImpl> terminatedString = createUninitialized(length + 1, data);
memcpy(data, string.m_data, length * sizeof(UChar));
data[length] = 0;
return empty();
}
- if (length > ((std::numeric_limits<size_t>::max() - sizeof(StringImpl)) / sizeof(UChar)))
+ if (length > ((std::numeric_limits<unsigned>::max() - sizeof(StringImpl)) / sizeof(UChar))) {
+ output = 0;
return 0;
+ }
StringImpl* resultImpl;
- if (!tryFastMalloc(sizeof(UChar) * length + sizeof(StringImpl)).getValue(resultImpl))
+ if (!tryFastMalloc(sizeof(UChar) * length + sizeof(StringImpl)).getValue(resultImpl)) {
+ output = 0;
return 0;
+ }
output = reinterpret_cast<UChar*>(resultImpl + 1);
return adoptRef(new(resultImpl) StringImpl(length));
}
{
if (size_t size = vector.size()) {
ASSERT(vector.data());
+ if (size > std::numeric_limits<unsigned>::max())
+ CRASH();
return adoptRef(new StringImpl(vector.releaseBuffer(), size));
}
return empty();
PassRefPtr<StringImpl> lower();
PassRefPtr<StringImpl> upper();
- PassRefPtr<StringImpl> secure(UChar aChar, bool last = true);
+ PassRefPtr<StringImpl> secure(UChar, bool hideLastCharacter = true);
PassRefPtr<StringImpl> foldCase();
PassRefPtr<StringImpl> stripWhiteSpace();
/*
* (C) 1999 Lars Knoll (knoll@kde.org)
- * Copyright (C) 2004, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010 Apple Inc. All rights reserved.
* Copyright (C) 2007-2009 Torch Mobile, Inc.
*
* This library is free software; you can redistribute it and/or
#include "config.h"
#include "WTFString.h"
-#include <limits>
#include <stdarg.h>
#include <wtf/ASCIICType.h>
#include <wtf/text/CString.h>
using namespace WTF;
using namespace WTF::Unicode;
+using namespace std;
namespace WebCore {
if (!str)
return;
- int len = 0;
+ size_t len = 0;
while (str[len] != UChar(0))
len++;
+
+ if (len > numeric_limits<unsigned>::max())
+ CRASH();
m_impl = StringImpl::create(str, len);
}
if (str.m_impl) {
if (m_impl) {
UChar* data;
- RefPtr<StringImpl> newImpl =
- StringImpl::createUninitialized(m_impl->length() + str.length(), data);
+ if (str.length() > numeric_limits<unsigned>::max() - m_impl->length())
+ CRASH();
+ RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + str.length(), data);
memcpy(data, m_impl->characters(), m_impl->length() * sizeof(UChar));
memcpy(data + m_impl->length(), str.characters(), str.length() * sizeof(UChar));
m_impl = newImpl.release();
// call to fastMalloc every single time.
if (m_impl) {
UChar* data;
- RefPtr<StringImpl> newImpl =
- StringImpl::createUninitialized(m_impl->length() + 1, data);
+ if (m_impl->length() >= numeric_limits<unsigned>::max())
+ CRASH();
+ RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + 1, data);
memcpy(data, m_impl->characters(), m_impl->length() * sizeof(UChar));
data[m_impl->length()] = c;
m_impl = newImpl.release();
// call to fastMalloc every single time.
if (m_impl) {
UChar* data;
- RefPtr<StringImpl> newImpl =
- StringImpl::createUninitialized(m_impl->length() + 1, data);
+ if (m_impl->length() >= numeric_limits<unsigned>::max())
+ CRASH();
+ RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(m_impl->length() + 1, data);
memcpy(data, m_impl->characters(), m_impl->length() * sizeof(UChar));
data[m_impl->length()] = c;
m_impl = newImpl.release();
ASSERT(charactersToAppend);
UChar* data;
- RefPtr<StringImpl> newImpl =
- StringImpl::createUninitialized(length() + lengthToAppend, data);
+ if (lengthToAppend > numeric_limits<unsigned>::max() - length())
+ CRASH();
+ RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() + lengthToAppend, data);
memcpy(data, characters(), length() * sizeof(UChar));
memcpy(data + length(), charactersToAppend, lengthToAppend * sizeof(UChar));
m_impl = newImpl.release();
ASSERT(charactersToInsert);
UChar* data;
- RefPtr<StringImpl> newImpl =
- StringImpl::createUninitialized(length() + lengthToInsert, data);
+ if (lengthToInsert > numeric_limits<unsigned>::max() - length())
+ CRASH();
+ RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() + lengthToInsert, data);
memcpy(data, characters(), position * sizeof(UChar));
memcpy(data + position, charactersToInsert, lengthToInsert * sizeof(UChar));
memcpy(data + position + lengthToInsert, characters() + position, (length() - position) * sizeof(UChar));
if (static_cast<unsigned>(lengthToRemove) > length() - position)
lengthToRemove = length() - position;
UChar* data;
- RefPtr<StringImpl> newImpl =
- StringImpl::createUninitialized(length() - lengthToRemove, data);
+ RefPtr<StringImpl> newImpl = StringImpl::createUninitialized(length() - lengthToRemove, data);
memcpy(data, characters(), position * sizeof(UChar));
memcpy(data + position, characters() + position + lengthToRemove,
(length() - lengthToRemove - position) * sizeof(UChar));
// * We could allocate a CStringBuffer with an appropriate size to
// have a good chance of being able to write the string into the
// buffer without reallocing (say, 1.5 x length).
+ if (length > numeric_limits<unsigned>::max() / 3)
+ return CString();
Vector<char, 1024> bufferVector(length * 3);
char* buffer = bufferVector.data();
String String::fromUTF8(const char* stringStart, size_t length)
{
+ if (length > numeric_limits<unsigned>::max())
+ CRASH();
+
if (!stringStart)
return String();
template <typename IntegralType>
static inline IntegralType toIntegralType(const UChar* data, size_t length, bool* ok, int base)
{
- static const IntegralType integralMax = std::numeric_limits<IntegralType>::max();
- static const bool isSigned = std::numeric_limits<IntegralType>::is_signed;
+ static const IntegralType integralMax = numeric_limits<IntegralType>::max();
+ static const bool isSigned = numeric_limits<IntegralType>::is_signed;
const IntegralType maxMultiplier = integralMax / base;
IntegralType value = 0;
#include "JSGlobalData.h"
#include "LinkBuffer.h"
#include "MacroAssembler.h"
+#include "RegExpCache.h"
#include "RegexCompiler.h"
#include "pcre.h" // temporary, remove when fallback is removed.
{
generate();
- LinkBuffer patchBuffer(this, globalData->executableAllocator.poolForSize(size()));
+ LinkBuffer patchBuffer(this, globalData->regexAllocator.poolForSize(size()), 0);
for (unsigned i = 0; i < m_backtrackRecords.size(); ++i)
patchBuffer.patch(m_backtrackRecords[i].dataLabel, patchBuffer.locationOf(m_backtrackRecords[i].backtrackLocation));
return;
numSubpatterns = pattern.m_numSubpatterns;
- if (!pattern.m_shouldFallBack && globalData->canUseJIT()) {
+ if (!pattern.m_shouldFallBack && globalData->canUseJIT() && RegExpCache::isCacheable(patternString)) {
RegexGenerator generator(pattern);
generator.compile(globalData, jitObject);
return;