X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174..HEAD:/runtime/SymbolTable.h diff --git a/runtime/SymbolTable.h b/runtime/SymbolTable.h index f540a12..9f0bc4e 100644 --- a/runtime/SymbolTable.h +++ b/runtime/SymbolTable.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2007, 2008, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -29,105 +29,663 @@ #ifndef SymbolTable_h #define SymbolTable_h +#include "ConcurrentJITLock.h" +#include "ConstantMode.h" +#include "InferredValue.h" #include "JSObject.h" -#include "UString.h" -#include +#include "ScopedArgumentsTable.h" +#include "TypeLocation.h" +#include "VarOffset.h" +#include "Watchpoint.h" +#include #include +#include namespace JSC { - static ALWAYS_INLINE int missingSymbolMarker() { return std::numeric_limits::max(); } +class SymbolTable; - // The bit twiddling in this class assumes that every register index is a - // reasonably small positive or negative number, and therefore has its high - // four bits all set or all unset. +static ALWAYS_INLINE int missingSymbolMarker() { return std::numeric_limits::max(); } - struct SymbolTableEntry { - SymbolTableEntry() - : m_bits(0) +// The bit twiddling in this class assumes that every register index is a +// reasonably small positive or negative number, and therefore has its high +// four bits all set or all unset. + +// In addition to implementing semantics-mandated variable attributes and +// implementation-mandated variable indexing, this class also implements +// watchpoints to be used for JIT optimizations. Because watchpoints are +// meant to be relatively rare, this class optimizes heavily for the case +// that they are not being used. To that end, this class uses the thin-fat +// idiom: either it is thin, in which case it contains an in-place encoded +// word that consists of attributes, the index, and a bit saying that it is +// thin; or it is fat, in which case it contains a pointer to a malloc'd +// data structure and a bit saying that it is fat. The malloc'd data +// structure will be malloced a second time upon copy, to preserve the +// property that in-place edits to SymbolTableEntry do not manifest in any +// copies. However, the malloc'd FatEntry data structure contains a ref- +// counted pointer to a shared WatchpointSet. Thus, in-place edits of the +// WatchpointSet will manifest in all copies. Here's a picture: +// +// SymbolTableEntry --> FatEntry --> WatchpointSet +// +// If you make a copy of a SymbolTableEntry, you will have: +// +// original: SymbolTableEntry --> FatEntry --> WatchpointSet +// copy: SymbolTableEntry --> FatEntry -----^ + +struct SymbolTableEntry { +private: + static VarOffset varOffsetFromBits(intptr_t bits) + { + VarKind kind; + intptr_t kindBits = bits & KindBitsMask; + if (kindBits <= UnwatchableScopeKindBits) + kind = VarKind::Scope; + else if (kindBits == StackKindBits) + kind = VarKind::Stack; + else + kind = VarKind::DirectArgument; + return VarOffset::assemble(kind, static_cast(bits >> FlagBits)); + } + + static ScopeOffset scopeOffsetFromBits(intptr_t bits) + { + ASSERT((bits & KindBitsMask) <= UnwatchableScopeKindBits); + return ScopeOffset(static_cast(bits >> FlagBits)); + } + +public: + + // Use the SymbolTableEntry::Fast class, either via implicit cast or by calling + // getFast(), when you (1) only care about isNull(), getIndex(), and isReadOnly(), + // and (2) you are in a hot path where you need to minimize the number of times + // that you branch on isFat() when getting the bits(). + class Fast { + public: + Fast() + : m_bits(SlimFlag) { } - - SymbolTableEntry(int index) + + ALWAYS_INLINE Fast(const SymbolTableEntry& entry) + : m_bits(entry.bits()) { - ASSERT(isValidIndex(index)); - pack(index, false, false); + } + + bool isNull() const + { + return !(m_bits & ~SlimFlag); } - SymbolTableEntry(int index, unsigned attributes) + VarOffset varOffset() const { - ASSERT(isValidIndex(index)); - pack(index, attributes & ReadOnly, attributes & DontEnum); + return varOffsetFromBits(m_bits); } - bool isNull() const + // Asserts if the offset is anything but a scope offset. This structures the assertions + // in a way that may result in better code, even in release, than doing + // varOffset().scopeOffset(). + ScopeOffset scopeOffset() const { - return !m_bits; + return scopeOffsetFromBits(m_bits); } - - int getIndex() const + + bool isReadOnly() const { - return m_bits >> FlagBits; + return m_bits & ReadOnlyFlag; } - + + bool isDontEnum() const + { + return m_bits & DontEnumFlag; + } + unsigned getAttributes() const { unsigned attributes = 0; - if (m_bits & ReadOnlyFlag) + if (isReadOnly()) attributes |= ReadOnly; - if (m_bits & DontEnumFlag) + if (isDontEnum()) attributes |= DontEnum; return attributes; } - void setAttributes(unsigned attributes) + bool isFat() const { - pack(getIndex(), attributes & ReadOnly, attributes & DontEnum); + return !(m_bits & SlimFlag); } + + private: + friend struct SymbolTableEntry; + intptr_t m_bits; + }; - bool isReadOnly() const - { - return m_bits & ReadOnlyFlag; - } + SymbolTableEntry() + : m_bits(SlimFlag) + { + } - private: - static const unsigned ReadOnlyFlag = 0x1; - static const unsigned DontEnumFlag = 0x2; - static const unsigned NotNullFlag = 0x4; - static const unsigned FlagBits = 3; + SymbolTableEntry(VarOffset offset) + : m_bits(SlimFlag) + { + ASSERT(isValidVarOffset(offset)); + pack(offset, true, false, false); + } - void pack(int index, bool readOnly, bool dontEnum) + SymbolTableEntry(VarOffset offset, unsigned attributes) + : m_bits(SlimFlag) + { + ASSERT(isValidVarOffset(offset)); + pack(offset, true, attributes & ReadOnly, attributes & DontEnum); + } + + ~SymbolTableEntry() + { + freeFatEntry(); + } + + SymbolTableEntry(const SymbolTableEntry& other) + : m_bits(SlimFlag) + { + *this = other; + } + + SymbolTableEntry& operator=(const SymbolTableEntry& other) + { + if (UNLIKELY(other.isFat())) + return copySlow(other); + freeFatEntry(); + m_bits = other.m_bits; + return *this; + } + + bool isNull() const + { + return !(bits() & ~SlimFlag); + } + + VarOffset varOffset() const + { + return varOffsetFromBits(bits()); + } + + bool isWatchable() const + { + return (m_bits & KindBitsMask) == ScopeKindBits; + } + + // Asserts if the offset is anything but a scope offset. This structures the assertions + // in a way that may result in better code, even in release, than doing + // varOffset().scopeOffset(). + ScopeOffset scopeOffset() const + { + return scopeOffsetFromBits(bits()); + } + + ALWAYS_INLINE Fast getFast() const + { + return Fast(*this); + } + + ALWAYS_INLINE Fast getFast(bool& wasFat) const + { + Fast result; + wasFat = isFat(); + if (wasFat) + result.m_bits = fatEntry()->m_bits | SlimFlag; + else + result.m_bits = m_bits; + return result; + } + + unsigned getAttributes() const + { + return getFast().getAttributes(); + } + + void setAttributes(unsigned attributes) + { + pack(varOffset(), isWatchable(), attributes & ReadOnly, attributes & DontEnum); + } + + bool isReadOnly() const + { + return bits() & ReadOnlyFlag; + } + + ConstantMode constantMode() const + { + return modeForIsConstant(isReadOnly()); + } + + bool isDontEnum() const + { + return bits() & DontEnumFlag; + } + + void disableWatching() + { + if (WatchpointSet* set = watchpointSet()) + set->invalidate("Disabling watching in symbol table"); + if (varOffset().isScope()) + pack(varOffset(), false, isReadOnly(), isDontEnum()); + } + + void prepareToWatch(); + + void addWatchpoint(Watchpoint*); + + // This watchpoint set is initialized clear, and goes through the following state transitions: + // + // First write to this var, in any scope that has this symbol table: Clear->IsWatched. + // + // Second write to this var, in any scope that has this symbol table: IsWatched->IsInvalidated. + // + // We ensure that we touch the set (i.e. trigger its state transition) after we do the write. This + // means that if you're in the compiler thread, and you: + // + // 1) Observe that the set IsWatched and commit to adding your watchpoint. + // 2) Load a value from any scope that has this watchpoint set. + // + // Then you can be sure that that value is either going to be the correct value for that var forever, + // or the watchpoint set will invalidate and you'll get fired. + // + // It's possible to write a program that first creates multiple scopes with the same var, and then + // initializes that var in just one of them. This means that a compilation could constant-fold to one + // of the scopes that still has an undefined value for this variable. That's fine, because at that + // point any write to any of the instances of that variable would fire the watchpoint. + WatchpointSet* watchpointSet() + { + if (!isFat()) + return 0; + return fatEntry()->m_watchpoints.get(); + } + +private: + static const intptr_t SlimFlag = 0x1; + static const intptr_t ReadOnlyFlag = 0x2; + static const intptr_t DontEnumFlag = 0x4; + static const intptr_t NotNullFlag = 0x8; + static const intptr_t KindBitsMask = 0x30; + static const intptr_t ScopeKindBits = 0x00; + static const intptr_t UnwatchableScopeKindBits = 0x10; + static const intptr_t StackKindBits = 0x20; + static const intptr_t DirectArgumentKindBits = 0x30; + static const intptr_t FlagBits = 6; + + class FatEntry { + WTF_MAKE_FAST_ALLOCATED; + public: + FatEntry(intptr_t bits) + : m_bits(bits & ~SlimFlag) { - m_bits = (index << FlagBits) | NotNullFlag; - if (readOnly) - m_bits |= ReadOnlyFlag; - if (dontEnum) - m_bits |= DontEnumFlag; } - bool isValidIndex(int index) - { - return ((index << FlagBits) >> FlagBits) == index; + intptr_t m_bits; // always has FatFlag set and exactly matches what the bits would have been if this wasn't fat. + + RefPtr m_watchpoints; + }; + + SymbolTableEntry& copySlow(const SymbolTableEntry&); + JS_EXPORT_PRIVATE void notifyWriteSlow(VM&, JSValue, const FireDetail&); + + bool isFat() const + { + return !(m_bits & SlimFlag); + } + + const FatEntry* fatEntry() const + { + ASSERT(isFat()); + return bitwise_cast(m_bits); + } + + FatEntry* fatEntry() + { + ASSERT(isFat()); + return bitwise_cast(m_bits); + } + + FatEntry* inflate() + { + if (LIKELY(isFat())) + return fatEntry(); + return inflateSlow(); + } + + FatEntry* inflateSlow(); + + ALWAYS_INLINE intptr_t bits() const + { + if (isFat()) + return fatEntry()->m_bits; + return m_bits; + } + + ALWAYS_INLINE intptr_t& bits() + { + if (isFat()) + return fatEntry()->m_bits; + return m_bits; + } + + void freeFatEntry() + { + if (LIKELY(!isFat())) + return; + freeFatEntrySlow(); + } + + JS_EXPORT_PRIVATE void freeFatEntrySlow(); + + void pack(VarOffset offset, bool isWatchable, bool readOnly, bool dontEnum) + { + ASSERT(!isFat()); + intptr_t& bitsRef = bits(); + bitsRef = + (static_cast(offset.rawOffset()) << FlagBits) | NotNullFlag | SlimFlag; + if (readOnly) + bitsRef |= ReadOnlyFlag; + if (dontEnum) + bitsRef |= DontEnumFlag; + switch (offset.kind()) { + case VarKind::Scope: + if (isWatchable) + bitsRef |= ScopeKindBits; + else + bitsRef |= UnwatchableScopeKindBits; + break; + case VarKind::Stack: + bitsRef |= StackKindBits; + break; + case VarKind::DirectArgument: + bitsRef |= DirectArgumentKindBits; + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; } + } + + static bool isValidVarOffset(VarOffset offset) + { + return ((static_cast(offset.rawOffset()) << FlagBits) >> FlagBits) == static_cast(offset.rawOffset()); + } - int m_bits; - }; + intptr_t m_bits; +}; - struct SymbolTableIndexHashTraits : HashTraits { - static const bool emptyValueIsZero = true; - static const bool needsDestruction = false; - }; +struct SymbolTableIndexHashTraits : HashTraits { + static const bool needsDestruction = true; +}; - typedef HashMap, SymbolTableEntry, IdentifierRepHash, HashTraits >, SymbolTableIndexHashTraits> SymbolTable; +class SymbolTable final : public JSCell { +public: + typedef JSCell Base; + static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; - class SharedSymbolTable : public SymbolTable, public RefCounted { - WTF_MAKE_FAST_ALLOCATED; - public: - static PassRefPtr create() { return adoptRef(new SharedSymbolTable); } - private: - SharedSymbolTable() { turnOffVerifier(); } + typedef HashMap, SymbolTableEntry, IdentifierRepHash, HashTraits>, SymbolTableIndexHashTraits> Map; + typedef HashMap, GlobalVariableID, IdentifierRepHash> UniqueIDMap; + typedef HashMap, RefPtr, IdentifierRepHash> UniqueTypeSetMap; + typedef HashMap> OffsetToVariableMap; + typedef Vector LocalToEntryVec; + + static SymbolTable* create(VM& vm) + { + SymbolTable* symbolTable = new (NotNull, allocateCell(vm.heap)) SymbolTable(vm); + symbolTable->finishCreation(vm); + return symbolTable; + } + + static SymbolTable* createNameScopeTable(VM& vm, const Identifier& ident, unsigned attributes) + { + SymbolTable* result = create(vm); + result->add(ident.impl(), SymbolTableEntry(VarOffset(ScopeOffset(0)), attributes)); + return result; + } + + static const bool needsDestruction = true; + static void destroy(JSCell*); + + static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); + } + + // You must hold the lock until after you're done with the iterator. + Map::iterator find(const ConcurrentJITLocker&, UniquedStringImpl* key) + { + return m_map.find(key); + } + + Map::iterator find(const GCSafeConcurrentJITLocker&, UniquedStringImpl* key) + { + return m_map.find(key); + } + + SymbolTableEntry get(const ConcurrentJITLocker&, UniquedStringImpl* key) + { + return m_map.get(key); + } + + SymbolTableEntry get(UniquedStringImpl* key) + { + ConcurrentJITLocker locker(m_lock); + return get(locker, key); + } + + SymbolTableEntry inlineGet(const ConcurrentJITLocker&, UniquedStringImpl* key) + { + return m_map.inlineGet(key); + } + + SymbolTableEntry inlineGet(UniquedStringImpl* key) + { + ConcurrentJITLocker locker(m_lock); + return inlineGet(locker, key); + } + + Map::iterator begin(const ConcurrentJITLocker&) + { + return m_map.begin(); + } + + Map::iterator end(const ConcurrentJITLocker&) + { + return m_map.end(); + } + + Map::iterator end(const GCSafeConcurrentJITLocker&) + { + return m_map.end(); + } + + size_t size(const ConcurrentJITLocker&) const + { + return m_map.size(); + } + + size_t size() const + { + ConcurrentJITLocker locker(m_lock); + return size(locker); + } + + ScopeOffset maxScopeOffset() const + { + return m_maxScopeOffset; + } + + void didUseScopeOffset(ScopeOffset offset) + { + if (!m_maxScopeOffset || m_maxScopeOffset < offset) + m_maxScopeOffset = offset; + } + + void didUseVarOffset(VarOffset offset) + { + if (offset.isScope()) + didUseScopeOffset(offset.scopeOffset()); + } + + unsigned scopeSize() const + { + ScopeOffset maxScopeOffset = this->maxScopeOffset(); + + // Do some calculation that relies on invalid scope offset plus one being zero. + unsigned fastResult = maxScopeOffset.offsetUnchecked() + 1; + + // Assert that this works. + ASSERT(fastResult == (!maxScopeOffset ? 0 : maxScopeOffset.offset() + 1)); + + return fastResult; + } + + ScopeOffset nextScopeOffset() const + { + return ScopeOffset(scopeSize()); + } + + ScopeOffset takeNextScopeOffset(const ConcurrentJITLocker&) + { + ScopeOffset result = nextScopeOffset(); + m_maxScopeOffset = result; + return result; + } + + ScopeOffset takeNextScopeOffset() + { + ConcurrentJITLocker locker(m_lock); + return takeNextScopeOffset(locker); + } + + void add(const ConcurrentJITLocker&, UniquedStringImpl* key, const SymbolTableEntry& entry) + { + RELEASE_ASSERT(!m_localToEntry); + didUseVarOffset(entry.varOffset()); + Map::AddResult result = m_map.add(key, entry); + ASSERT_UNUSED(result, result.isNewEntry); + } + + void add(UniquedStringImpl* key, const SymbolTableEntry& entry) + { + ConcurrentJITLocker locker(m_lock); + add(locker, key, entry); + } + + void set(const ConcurrentJITLocker&, UniquedStringImpl* key, const SymbolTableEntry& entry) + { + RELEASE_ASSERT(!m_localToEntry); + didUseVarOffset(entry.varOffset()); + m_map.set(key, entry); + } + + void set(UniquedStringImpl* key, const SymbolTableEntry& entry) + { + ConcurrentJITLocker locker(m_lock); + set(locker, key, entry); + } + + bool contains(const ConcurrentJITLocker&, UniquedStringImpl* key) + { + return m_map.contains(key); + } + + bool contains(UniquedStringImpl* key) + { + ConcurrentJITLocker locker(m_lock); + return contains(locker, key); + } + + // The principle behind ScopedArgumentsTable modifications is that we will create one and + // leave it unlocked - thereby allowing in-place changes - until someone asks for a pointer to + // the table. Then, we will lock it. Then both our future changes and their future changes + // will first have to make a copy. This discipline means that usually when we create a + // ScopedArguments object, we don't have to make a copy of the ScopedArgumentsTable - instead + // we just take a reference to one that we already have. + + uint32_t argumentsLength() const + { + if (!m_arguments) + return 0; + return m_arguments->length(); + } + + void setArgumentsLength(VM& vm, uint32_t length) + { + if (UNLIKELY(!m_arguments)) + m_arguments.set(vm, this, ScopedArgumentsTable::create(vm)); + m_arguments.set(vm, this, m_arguments->setLength(vm, length)); + } + + ScopeOffset argumentOffset(uint32_t i) const + { + ASSERT_WITH_SECURITY_IMPLICATION(m_arguments); + return m_arguments->get(i); + } + + void setArgumentOffset(VM& vm, uint32_t i, ScopeOffset offset) + { + ASSERT_WITH_SECURITY_IMPLICATION(m_arguments); + m_arguments.set(vm, this, m_arguments->set(vm, i, offset)); + } + + ScopedArgumentsTable* arguments() const + { + if (!m_arguments) + return nullptr; + m_arguments->lock(); + return m_arguments.get(); + } + + const LocalToEntryVec& localToEntry(const ConcurrentJITLocker&); + SymbolTableEntry* entryFor(const ConcurrentJITLocker&, ScopeOffset); + + GlobalVariableID uniqueIDForVariable(const ConcurrentJITLocker&, UniquedStringImpl* key, VM&); + GlobalVariableID uniqueIDForOffset(const ConcurrentJITLocker&, VarOffset, VM&); + RefPtr globalTypeSetForOffset(const ConcurrentJITLocker&, VarOffset, VM&); + RefPtr globalTypeSetForVariable(const ConcurrentJITLocker&, UniquedStringImpl* key, VM&); + + bool usesNonStrictEval() { return m_usesNonStrictEval; } + void setUsesNonStrictEval(bool usesNonStrictEval) { m_usesNonStrictEval = usesNonStrictEval; } + + SymbolTable* cloneScopePart(VM&); + + void prepareForTypeProfiling(const ConcurrentJITLocker&); + + InferredValue* singletonScope() { return m_singletonScope.get(); } + + static void visitChildren(JSCell*, SlotVisitor&); + + DECLARE_EXPORT_INFO; + +private: + JS_EXPORT_PRIVATE SymbolTable(VM&); + ~SymbolTable(); + + JS_EXPORT_PRIVATE void finishCreation(VM&); + + Map m_map; + ScopeOffset m_maxScopeOffset; + + struct TypeProfilingRareData { + UniqueIDMap m_uniqueIDMap; + OffsetToVariableMap m_offsetToVariableMap; + UniqueTypeSetMap m_uniqueTypeSetMap; }; + std::unique_ptr m_typeProfilingRareData; + + bool m_usesNonStrictEval; + WriteBarrier m_arguments; + WriteBarrier m_singletonScope; + + std::unique_ptr m_localToEntry; + +public: + mutable ConcurrentJITLock m_lock; +}; + } // namespace JSC #endif // SymbolTable_h