2 * Copyright (C) 2007, 2008, 2012-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "ConcurrentJITLock.h"
33 #include "ConstantMode.h"
34 #include "InferredValue.h"
36 #include "ScopedArgumentsTable.h"
37 #include "TypeLocation.h"
38 #include "VarOffset.h"
39 #include "Watchpoint.h"
41 #include <wtf/HashTraits.h>
42 #include <wtf/text/UniquedStringImpl.h>
48 static ALWAYS_INLINE
int missingSymbolMarker() { return std::numeric_limits
<int>::max(); }
50 // The bit twiddling in this class assumes that every register index is a
51 // reasonably small positive or negative number, and therefore has its high
52 // four bits all set or all unset.
54 // In addition to implementing semantics-mandated variable attributes and
55 // implementation-mandated variable indexing, this class also implements
56 // watchpoints to be used for JIT optimizations. Because watchpoints are
57 // meant to be relatively rare, this class optimizes heavily for the case
58 // that they are not being used. To that end, this class uses the thin-fat
59 // idiom: either it is thin, in which case it contains an in-place encoded
60 // word that consists of attributes, the index, and a bit saying that it is
61 // thin; or it is fat, in which case it contains a pointer to a malloc'd
62 // data structure and a bit saying that it is fat. The malloc'd data
63 // structure will be malloced a second time upon copy, to preserve the
64 // property that in-place edits to SymbolTableEntry do not manifest in any
65 // copies. However, the malloc'd FatEntry data structure contains a ref-
66 // counted pointer to a shared WatchpointSet. Thus, in-place edits of the
67 // WatchpointSet will manifest in all copies. Here's a picture:
69 // SymbolTableEntry --> FatEntry --> WatchpointSet
71 // If you make a copy of a SymbolTableEntry, you will have:
73 // original: SymbolTableEntry --> FatEntry --> WatchpointSet
74 // copy: SymbolTableEntry --> FatEntry -----^
76 struct SymbolTableEntry
{
78 static VarOffset
varOffsetFromBits(intptr_t bits
)
81 intptr_t kindBits
= bits
& KindBitsMask
;
82 if (kindBits
<= UnwatchableScopeKindBits
)
83 kind
= VarKind::Scope
;
84 else if (kindBits
== StackKindBits
)
85 kind
= VarKind::Stack
;
87 kind
= VarKind::DirectArgument
;
88 return VarOffset::assemble(kind
, static_cast<int>(bits
>> FlagBits
));
91 static ScopeOffset
scopeOffsetFromBits(intptr_t bits
)
93 ASSERT((bits
& KindBitsMask
) <= UnwatchableScopeKindBits
);
94 return ScopeOffset(static_cast<int>(bits
>> FlagBits
));
99 // Use the SymbolTableEntry::Fast class, either via implicit cast or by calling
100 // getFast(), when you (1) only care about isNull(), getIndex(), and isReadOnly(),
101 // and (2) you are in a hot path where you need to minimize the number of times
102 // that you branch on isFat() when getting the bits().
110 ALWAYS_INLINE
Fast(const SymbolTableEntry
& entry
)
111 : m_bits(entry
.bits())
117 return !(m_bits
& ~SlimFlag
);
120 VarOffset
varOffset() const
122 return varOffsetFromBits(m_bits
);
125 // Asserts if the offset is anything but a scope offset. This structures the assertions
126 // in a way that may result in better code, even in release, than doing
127 // varOffset().scopeOffset().
128 ScopeOffset
scopeOffset() const
130 return scopeOffsetFromBits(m_bits
);
133 bool isReadOnly() const
135 return m_bits
& ReadOnlyFlag
;
138 bool isDontEnum() const
140 return m_bits
& DontEnumFlag
;
143 unsigned getAttributes() const
145 unsigned attributes
= 0;
147 attributes
|= ReadOnly
;
149 attributes
|= DontEnum
;
155 return !(m_bits
& SlimFlag
);
159 friend struct SymbolTableEntry
;
168 SymbolTableEntry(VarOffset offset
)
171 ASSERT(isValidVarOffset(offset
));
172 pack(offset
, true, false, false);
175 SymbolTableEntry(VarOffset offset
, unsigned attributes
)
178 ASSERT(isValidVarOffset(offset
));
179 pack(offset
, true, attributes
& ReadOnly
, attributes
& DontEnum
);
187 SymbolTableEntry(const SymbolTableEntry
& other
)
193 SymbolTableEntry
& operator=(const SymbolTableEntry
& other
)
195 if (UNLIKELY(other
.isFat()))
196 return copySlow(other
);
198 m_bits
= other
.m_bits
;
204 return !(bits() & ~SlimFlag
);
207 VarOffset
varOffset() const
209 return varOffsetFromBits(bits());
212 bool isWatchable() const
214 return (m_bits
& KindBitsMask
) == ScopeKindBits
;
217 // Asserts if the offset is anything but a scope offset. This structures the assertions
218 // in a way that may result in better code, even in release, than doing
219 // varOffset().scopeOffset().
220 ScopeOffset
scopeOffset() const
222 return scopeOffsetFromBits(bits());
225 ALWAYS_INLINE Fast
getFast() const
230 ALWAYS_INLINE Fast
getFast(bool& wasFat
) const
235 result
.m_bits
= fatEntry()->m_bits
| SlimFlag
;
237 result
.m_bits
= m_bits
;
241 unsigned getAttributes() const
243 return getFast().getAttributes();
246 void setAttributes(unsigned attributes
)
248 pack(varOffset(), isWatchable(), attributes
& ReadOnly
, attributes
& DontEnum
);
251 bool isReadOnly() const
253 return bits() & ReadOnlyFlag
;
256 ConstantMode
constantMode() const
258 return modeForIsConstant(isReadOnly());
261 bool isDontEnum() const
263 return bits() & DontEnumFlag
;
266 void disableWatching()
268 if (WatchpointSet
* set
= watchpointSet())
269 set
->invalidate("Disabling watching in symbol table");
270 if (varOffset().isScope())
271 pack(varOffset(), false, isReadOnly(), isDontEnum());
274 void prepareToWatch();
276 void addWatchpoint(Watchpoint
*);
278 // This watchpoint set is initialized clear, and goes through the following state transitions:
280 // First write to this var, in any scope that has this symbol table: Clear->IsWatched.
282 // Second write to this var, in any scope that has this symbol table: IsWatched->IsInvalidated.
284 // We ensure that we touch the set (i.e. trigger its state transition) after we do the write. This
285 // means that if you're in the compiler thread, and you:
287 // 1) Observe that the set IsWatched and commit to adding your watchpoint.
288 // 2) Load a value from any scope that has this watchpoint set.
290 // Then you can be sure that that value is either going to be the correct value for that var forever,
291 // or the watchpoint set will invalidate and you'll get fired.
293 // It's possible to write a program that first creates multiple scopes with the same var, and then
294 // initializes that var in just one of them. This means that a compilation could constant-fold to one
295 // of the scopes that still has an undefined value for this variable. That's fine, because at that
296 // point any write to any of the instances of that variable would fire the watchpoint.
297 WatchpointSet
* watchpointSet()
301 return fatEntry()->m_watchpoints
.get();
305 static const intptr_t SlimFlag
= 0x1;
306 static const intptr_t ReadOnlyFlag
= 0x2;
307 static const intptr_t DontEnumFlag
= 0x4;
308 static const intptr_t NotNullFlag
= 0x8;
309 static const intptr_t KindBitsMask
= 0x30;
310 static const intptr_t ScopeKindBits
= 0x00;
311 static const intptr_t UnwatchableScopeKindBits
= 0x10;
312 static const intptr_t StackKindBits
= 0x20;
313 static const intptr_t DirectArgumentKindBits
= 0x30;
314 static const intptr_t FlagBits
= 6;
317 WTF_MAKE_FAST_ALLOCATED
;
319 FatEntry(intptr_t bits
)
320 : m_bits(bits
& ~SlimFlag
)
324 intptr_t m_bits
; // always has FatFlag set and exactly matches what the bits would have been if this wasn't fat.
326 RefPtr
<WatchpointSet
> m_watchpoints
;
329 SymbolTableEntry
& copySlow(const SymbolTableEntry
&);
330 JS_EXPORT_PRIVATE
void notifyWriteSlow(VM
&, JSValue
, const FireDetail
&);
334 return !(m_bits
& SlimFlag
);
337 const FatEntry
* fatEntry() const
340 return bitwise_cast
<const FatEntry
*>(m_bits
);
346 return bitwise_cast
<FatEntry
*>(m_bits
);
353 return inflateSlow();
356 FatEntry
* inflateSlow();
358 ALWAYS_INLINE
intptr_t bits() const
361 return fatEntry()->m_bits
;
365 ALWAYS_INLINE
intptr_t& bits()
368 return fatEntry()->m_bits
;
374 if (LIKELY(!isFat()))
379 JS_EXPORT_PRIVATE
void freeFatEntrySlow();
381 void pack(VarOffset offset
, bool isWatchable
, bool readOnly
, bool dontEnum
)
384 intptr_t& bitsRef
= bits();
386 (static_cast<intptr_t>(offset
.rawOffset()) << FlagBits
) | NotNullFlag
| SlimFlag
;
388 bitsRef
|= ReadOnlyFlag
;
390 bitsRef
|= DontEnumFlag
;
391 switch (offset
.kind()) {
394 bitsRef
|= ScopeKindBits
;
396 bitsRef
|= UnwatchableScopeKindBits
;
399 bitsRef
|= StackKindBits
;
401 case VarKind::DirectArgument
:
402 bitsRef
|= DirectArgumentKindBits
;
405 RELEASE_ASSERT_NOT_REACHED();
410 static bool isValidVarOffset(VarOffset offset
)
412 return ((static_cast<intptr_t>(offset
.rawOffset()) << FlagBits
) >> FlagBits
) == static_cast<intptr_t>(offset
.rawOffset());
418 struct SymbolTableIndexHashTraits
: HashTraits
<SymbolTableEntry
> {
419 static const bool needsDestruction
= true;
422 class SymbolTable final
: public JSCell
{
425 static const unsigned StructureFlags
= Base::StructureFlags
| StructureIsImmortal
;
427 typedef HashMap
<RefPtr
<UniquedStringImpl
>, SymbolTableEntry
, IdentifierRepHash
, HashTraits
<RefPtr
<UniquedStringImpl
>>, SymbolTableIndexHashTraits
> Map
;
428 typedef HashMap
<RefPtr
<UniquedStringImpl
>, GlobalVariableID
, IdentifierRepHash
> UniqueIDMap
;
429 typedef HashMap
<RefPtr
<UniquedStringImpl
>, RefPtr
<TypeSet
>, IdentifierRepHash
> UniqueTypeSetMap
;
430 typedef HashMap
<VarOffset
, RefPtr
<UniquedStringImpl
>> OffsetToVariableMap
;
431 typedef Vector
<SymbolTableEntry
*> LocalToEntryVec
;
433 static SymbolTable
* create(VM
& vm
)
435 SymbolTable
* symbolTable
= new (NotNull
, allocateCell
<SymbolTable
>(vm
.heap
)) SymbolTable(vm
);
436 symbolTable
->finishCreation(vm
);
440 static SymbolTable
* createNameScopeTable(VM
& vm
, const Identifier
& ident
, unsigned attributes
)
442 SymbolTable
* result
= create(vm
);
443 result
->add(ident
.impl(), SymbolTableEntry(VarOffset(ScopeOffset(0)), attributes
));
447 static const bool needsDestruction
= true;
448 static void destroy(JSCell
*);
450 static Structure
* createStructure(VM
& vm
, JSGlobalObject
* globalObject
, JSValue prototype
)
452 return Structure::create(vm
, globalObject
, prototype
, TypeInfo(CellType
, StructureFlags
), info());
455 // You must hold the lock until after you're done with the iterator.
456 Map::iterator
find(const ConcurrentJITLocker
&, UniquedStringImpl
* key
)
458 return m_map
.find(key
);
461 Map::iterator
find(const GCSafeConcurrentJITLocker
&, UniquedStringImpl
* key
)
463 return m_map
.find(key
);
466 SymbolTableEntry
get(const ConcurrentJITLocker
&, UniquedStringImpl
* key
)
468 return m_map
.get(key
);
471 SymbolTableEntry
get(UniquedStringImpl
* key
)
473 ConcurrentJITLocker
locker(m_lock
);
474 return get(locker
, key
);
477 SymbolTableEntry
inlineGet(const ConcurrentJITLocker
&, UniquedStringImpl
* key
)
479 return m_map
.inlineGet(key
);
482 SymbolTableEntry
inlineGet(UniquedStringImpl
* key
)
484 ConcurrentJITLocker
locker(m_lock
);
485 return inlineGet(locker
, key
);
488 Map::iterator
begin(const ConcurrentJITLocker
&)
490 return m_map
.begin();
493 Map::iterator
end(const ConcurrentJITLocker
&)
498 Map::iterator
end(const GCSafeConcurrentJITLocker
&)
503 size_t size(const ConcurrentJITLocker
&) const
510 ConcurrentJITLocker
locker(m_lock
);
514 ScopeOffset
maxScopeOffset() const
516 return m_maxScopeOffset
;
519 void didUseScopeOffset(ScopeOffset offset
)
521 if (!m_maxScopeOffset
|| m_maxScopeOffset
< offset
)
522 m_maxScopeOffset
= offset
;
525 void didUseVarOffset(VarOffset offset
)
527 if (offset
.isScope())
528 didUseScopeOffset(offset
.scopeOffset());
531 unsigned scopeSize() const
533 ScopeOffset maxScopeOffset
= this->maxScopeOffset();
535 // Do some calculation that relies on invalid scope offset plus one being zero.
536 unsigned fastResult
= maxScopeOffset
.offsetUnchecked() + 1;
538 // Assert that this works.
539 ASSERT(fastResult
== (!maxScopeOffset
? 0 : maxScopeOffset
.offset() + 1));
544 ScopeOffset
nextScopeOffset() const
546 return ScopeOffset(scopeSize());
549 ScopeOffset
takeNextScopeOffset(const ConcurrentJITLocker
&)
551 ScopeOffset result
= nextScopeOffset();
552 m_maxScopeOffset
= result
;
556 ScopeOffset
takeNextScopeOffset()
558 ConcurrentJITLocker
locker(m_lock
);
559 return takeNextScopeOffset(locker
);
562 void add(const ConcurrentJITLocker
&, UniquedStringImpl
* key
, const SymbolTableEntry
& entry
)
564 RELEASE_ASSERT(!m_localToEntry
);
565 didUseVarOffset(entry
.varOffset());
566 Map::AddResult result
= m_map
.add(key
, entry
);
567 ASSERT_UNUSED(result
, result
.isNewEntry
);
570 void add(UniquedStringImpl
* key
, const SymbolTableEntry
& entry
)
572 ConcurrentJITLocker
locker(m_lock
);
573 add(locker
, key
, entry
);
576 void set(const ConcurrentJITLocker
&, UniquedStringImpl
* key
, const SymbolTableEntry
& entry
)
578 RELEASE_ASSERT(!m_localToEntry
);
579 didUseVarOffset(entry
.varOffset());
580 m_map
.set(key
, entry
);
583 void set(UniquedStringImpl
* key
, const SymbolTableEntry
& entry
)
585 ConcurrentJITLocker
locker(m_lock
);
586 set(locker
, key
, entry
);
589 bool contains(const ConcurrentJITLocker
&, UniquedStringImpl
* key
)
591 return m_map
.contains(key
);
594 bool contains(UniquedStringImpl
* key
)
596 ConcurrentJITLocker
locker(m_lock
);
597 return contains(locker
, key
);
600 // The principle behind ScopedArgumentsTable modifications is that we will create one and
601 // leave it unlocked - thereby allowing in-place changes - until someone asks for a pointer to
602 // the table. Then, we will lock it. Then both our future changes and their future changes
603 // will first have to make a copy. This discipline means that usually when we create a
604 // ScopedArguments object, we don't have to make a copy of the ScopedArgumentsTable - instead
605 // we just take a reference to one that we already have.
607 uint32_t argumentsLength() const
611 return m_arguments
->length();
614 void setArgumentsLength(VM
& vm
, uint32_t length
)
616 if (UNLIKELY(!m_arguments
))
617 m_arguments
.set(vm
, this, ScopedArgumentsTable::create(vm
));
618 m_arguments
.set(vm
, this, m_arguments
->setLength(vm
, length
));
621 ScopeOffset
argumentOffset(uint32_t i
) const
623 ASSERT_WITH_SECURITY_IMPLICATION(m_arguments
);
624 return m_arguments
->get(i
);
627 void setArgumentOffset(VM
& vm
, uint32_t i
, ScopeOffset offset
)
629 ASSERT_WITH_SECURITY_IMPLICATION(m_arguments
);
630 m_arguments
.set(vm
, this, m_arguments
->set(vm
, i
, offset
));
633 ScopedArgumentsTable
* arguments() const
638 return m_arguments
.get();
641 const LocalToEntryVec
& localToEntry(const ConcurrentJITLocker
&);
642 SymbolTableEntry
* entryFor(const ConcurrentJITLocker
&, ScopeOffset
);
644 GlobalVariableID
uniqueIDForVariable(const ConcurrentJITLocker
&, UniquedStringImpl
* key
, VM
&);
645 GlobalVariableID
uniqueIDForOffset(const ConcurrentJITLocker
&, VarOffset
, VM
&);
646 RefPtr
<TypeSet
> globalTypeSetForOffset(const ConcurrentJITLocker
&, VarOffset
, VM
&);
647 RefPtr
<TypeSet
> globalTypeSetForVariable(const ConcurrentJITLocker
&, UniquedStringImpl
* key
, VM
&);
649 bool usesNonStrictEval() { return m_usesNonStrictEval
; }
650 void setUsesNonStrictEval(bool usesNonStrictEval
) { m_usesNonStrictEval
= usesNonStrictEval
; }
652 SymbolTable
* cloneScopePart(VM
&);
654 void prepareForTypeProfiling(const ConcurrentJITLocker
&);
656 InferredValue
* singletonScope() { return m_singletonScope
.get(); }
658 static void visitChildren(JSCell
*, SlotVisitor
&);
663 JS_EXPORT_PRIVATE
SymbolTable(VM
&);
666 JS_EXPORT_PRIVATE
void finishCreation(VM
&);
669 ScopeOffset m_maxScopeOffset
;
671 struct TypeProfilingRareData
{
672 UniqueIDMap m_uniqueIDMap
;
673 OffsetToVariableMap m_offsetToVariableMap
;
674 UniqueTypeSetMap m_uniqueTypeSetMap
;
676 std::unique_ptr
<TypeProfilingRareData
> m_typeProfilingRareData
;
678 bool m_usesNonStrictEval
;
680 WriteBarrier
<ScopedArgumentsTable
> m_arguments
;
681 WriteBarrier
<InferredValue
> m_singletonScope
;
683 std::unique_ptr
<LocalToEntryVec
> m_localToEntry
;
686 mutable ConcurrentJITLock m_lock
;
691 #endif // SymbolTable_h