]> git.saurik.com Git - apple/javascriptcore.git/blame - runtime/SymbolTable.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / runtime / SymbolTable.h
CommitLineData
9dae56ea 1/*
ed1e77d3 2 * Copyright (C) 2007, 2008, 2012-2015 Apple Inc. All rights reserved.
9dae56ea
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
81345200 13 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
9dae56ea
A
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#ifndef SymbolTable_h
30#define SymbolTable_h
31
81345200 32#include "ConcurrentJITLock.h"
ed1e77d3
A
33#include "ConstantMode.h"
34#include "InferredValue.h"
9dae56ea 35#include "JSObject.h"
ed1e77d3
A
36#include "ScopedArgumentsTable.h"
37#include "TypeLocation.h"
38#include "VarOffset.h"
39#include "Watchpoint.h"
81345200 40#include <memory>
6fe7ccc8 41#include <wtf/HashTraits.h>
ed1e77d3 42#include <wtf/text/UniquedStringImpl.h>
9dae56ea
A
43
44namespace JSC {
45
ed1e77d3 46class SymbolTable;
93a37866
A
47
48static ALWAYS_INLINE int missingSymbolMarker() { return std::numeric_limits<int>::max(); }
9dae56ea 49
93a37866
A
50// The bit twiddling in this class assumes that every register index is a
51// reasonably small positive or negative number, and therefore has its high
52// four bits all set or all unset.
53
54// In addition to implementing semantics-mandated variable attributes and
55// implementation-mandated variable indexing, this class also implements
56// watchpoints to be used for JIT optimizations. Because watchpoints are
57// meant to be relatively rare, this class optimizes heavily for the case
58// that they are not being used. To that end, this class uses the thin-fat
59// idiom: either it is thin, in which case it contains an in-place encoded
60// word that consists of attributes, the index, and a bit saying that it is
61// thin; or it is fat, in which case it contains a pointer to a malloc'd
62// data structure and a bit saying that it is fat. The malloc'd data
63// structure will be malloced a second time upon copy, to preserve the
64// property that in-place edits to SymbolTableEntry do not manifest in any
65// copies. However, the malloc'd FatEntry data structure contains a ref-
66// counted pointer to a shared WatchpointSet. Thus, in-place edits of the
67// WatchpointSet will manifest in all copies. Here's a picture:
68//
ed1e77d3 69// SymbolTableEntry --> FatEntry --> WatchpointSet
93a37866
A
70//
71// If you make a copy of a SymbolTableEntry, you will have:
72//
ed1e77d3 73// original: SymbolTableEntry --> FatEntry --> WatchpointSet
93a37866
A
74// copy: SymbolTableEntry --> FatEntry -----^
75
76struct SymbolTableEntry {
ed1e77d3
A
77private:
78 static VarOffset varOffsetFromBits(intptr_t bits)
79 {
80 VarKind kind;
81 intptr_t kindBits = bits & KindBitsMask;
82 if (kindBits <= UnwatchableScopeKindBits)
83 kind = VarKind::Scope;
84 else if (kindBits == StackKindBits)
85 kind = VarKind::Stack;
86 else
87 kind = VarKind::DirectArgument;
88 return VarOffset::assemble(kind, static_cast<int>(bits >> FlagBits));
89 }
90
91 static ScopeOffset scopeOffsetFromBits(intptr_t bits)
92 {
93 ASSERT((bits & KindBitsMask) <= UnwatchableScopeKindBits);
94 return ScopeOffset(static_cast<int>(bits >> FlagBits));
95 }
96
97public:
98
93a37866
A
99 // Use the SymbolTableEntry::Fast class, either via implicit cast or by calling
100 // getFast(), when you (1) only care about isNull(), getIndex(), and isReadOnly(),
101 // and (2) you are in a hot path where you need to minimize the number of times
102 // that you branch on isFat() when getting the bits().
103 class Fast {
104 public:
105 Fast()
106 : m_bits(SlimFlag)
9dae56ea 107 {
9dae56ea
A
108 }
109
93a37866
A
110 ALWAYS_INLINE Fast(const SymbolTableEntry& entry)
111 : m_bits(entry.bits())
112 {
113 }
114
9dae56ea
A
115 bool isNull() const
116 {
93a37866 117 return !(m_bits & ~SlimFlag);
9dae56ea
A
118 }
119
ed1e77d3 120 VarOffset varOffset() const
9dae56ea 121 {
ed1e77d3 122 return varOffsetFromBits(m_bits);
9dae56ea 123 }
ed1e77d3
A
124
125 // Asserts if the offset is anything but a scope offset. This structures the assertions
126 // in a way that may result in better code, even in release, than doing
127 // varOffset().scopeOffset().
128 ScopeOffset scopeOffset() const
129 {
130 return scopeOffsetFromBits(m_bits);
131 }
132
93a37866
A
133 bool isReadOnly() const
134 {
135 return m_bits & ReadOnlyFlag;
136 }
137
ed1e77d3
A
138 bool isDontEnum() const
139 {
140 return m_bits & DontEnumFlag;
141 }
142
9dae56ea
A
143 unsigned getAttributes() const
144 {
145 unsigned attributes = 0;
ed1e77d3 146 if (isReadOnly())
9dae56ea 147 attributes |= ReadOnly;
ed1e77d3 148 if (isDontEnum())
9dae56ea
A
149 attributes |= DontEnum;
150 return attributes;
151 }
152
93a37866 153 bool isFat() const
9dae56ea 154 {
93a37866 155 return !(m_bits & SlimFlag);
9dae56ea 156 }
93a37866 157
9dae56ea 158 private:
93a37866
A
159 friend struct SymbolTableEntry;
160 intptr_t m_bits;
161 };
9dae56ea 162
93a37866
A
163 SymbolTableEntry()
164 : m_bits(SlimFlag)
165 {
166 }
9dae56ea 167
ed1e77d3 168 SymbolTableEntry(VarOffset offset)
93a37866
A
169 : m_bits(SlimFlag)
170 {
ed1e77d3
A
171 ASSERT(isValidVarOffset(offset));
172 pack(offset, true, false, false);
93a37866 173 }
9dae56ea 174
ed1e77d3 175 SymbolTableEntry(VarOffset offset, unsigned attributes)
93a37866
A
176 : m_bits(SlimFlag)
177 {
ed1e77d3
A
178 ASSERT(isValidVarOffset(offset));
179 pack(offset, true, attributes & ReadOnly, attributes & DontEnum);
93a37866
A
180 }
181
182 ~SymbolTableEntry()
183 {
184 freeFatEntry();
185 }
186
187 SymbolTableEntry(const SymbolTableEntry& other)
188 : m_bits(SlimFlag)
189 {
190 *this = other;
191 }
192
193 SymbolTableEntry& operator=(const SymbolTableEntry& other)
194 {
195 if (UNLIKELY(other.isFat()))
196 return copySlow(other);
197 freeFatEntry();
198 m_bits = other.m_bits;
199 return *this;
200 }
201
202 bool isNull() const
203 {
204 return !(bits() & ~SlimFlag);
205 }
206
ed1e77d3 207 VarOffset varOffset() const
93a37866 208 {
ed1e77d3
A
209 return varOffsetFromBits(bits());
210 }
211
212 bool isWatchable() const
213 {
214 return (m_bits & KindBitsMask) == ScopeKindBits;
215 }
216
217 // Asserts if the offset is anything but a scope offset. This structures the assertions
218 // in a way that may result in better code, even in release, than doing
219 // varOffset().scopeOffset().
220 ScopeOffset scopeOffset() const
221 {
222 return scopeOffsetFromBits(bits());
93a37866
A
223 }
224
225 ALWAYS_INLINE Fast getFast() const
226 {
227 return Fast(*this);
228 }
229
230 ALWAYS_INLINE Fast getFast(bool& wasFat) const
231 {
232 Fast result;
233 wasFat = isFat();
234 if (wasFat)
235 result.m_bits = fatEntry()->m_bits | SlimFlag;
236 else
237 result.m_bits = m_bits;
238 return result;
239 }
240
241 unsigned getAttributes() const
242 {
243 return getFast().getAttributes();
244 }
ed1e77d3 245
93a37866
A
246 void setAttributes(unsigned attributes)
247 {
ed1e77d3 248 pack(varOffset(), isWatchable(), attributes & ReadOnly, attributes & DontEnum);
93a37866 249 }
9dae56ea 250
93a37866
A
251 bool isReadOnly() const
252 {
253 return bits() & ReadOnlyFlag;
254 }
255
ed1e77d3
A
256 ConstantMode constantMode() const
257 {
258 return modeForIsConstant(isReadOnly());
259 }
260
261 bool isDontEnum() const
262 {
263 return bits() & DontEnumFlag;
264 }
265
266 void disableWatching()
267 {
268 if (WatchpointSet* set = watchpointSet())
269 set->invalidate("Disabling watching in symbol table");
270 if (varOffset().isScope())
271 pack(varOffset(), false, isReadOnly(), isDontEnum());
272 }
93a37866 273
ed1e77d3 274 void prepareToWatch();
93a37866
A
275
276 void addWatchpoint(Watchpoint*);
277
ed1e77d3
A
278 // This watchpoint set is initialized clear, and goes through the following state transitions:
279 //
280 // First write to this var, in any scope that has this symbol table: Clear->IsWatched.
281 //
282 // Second write to this var, in any scope that has this symbol table: IsWatched->IsInvalidated.
283 //
284 // We ensure that we touch the set (i.e. trigger its state transition) after we do the write. This
285 // means that if you're in the compiler thread, and you:
286 //
287 // 1) Observe that the set IsWatched and commit to adding your watchpoint.
288 // 2) Load a value from any scope that has this watchpoint set.
289 //
290 // Then you can be sure that that value is either going to be the correct value for that var forever,
291 // or the watchpoint set will invalidate and you'll get fired.
292 //
293 // It's possible to write a program that first creates multiple scopes with the same var, and then
294 // initializes that var in just one of them. This means that a compilation could constant-fold to one
295 // of the scopes that still has an undefined value for this variable. That's fine, because at that
296 // point any write to any of the instances of that variable would fire the watchpoint.
297 WatchpointSet* watchpointSet()
93a37866 298 {
81345200
A
299 if (!isFat())
300 return 0;
93a37866
A
301 return fatEntry()->m_watchpoints.get();
302 }
303
93a37866
A
304private:
305 static const intptr_t SlimFlag = 0x1;
306 static const intptr_t ReadOnlyFlag = 0x2;
307 static const intptr_t DontEnumFlag = 0x4;
308 static const intptr_t NotNullFlag = 0x8;
ed1e77d3
A
309 static const intptr_t KindBitsMask = 0x30;
310 static const intptr_t ScopeKindBits = 0x00;
311 static const intptr_t UnwatchableScopeKindBits = 0x10;
312 static const intptr_t StackKindBits = 0x20;
313 static const intptr_t DirectArgumentKindBits = 0x30;
314 static const intptr_t FlagBits = 6;
93a37866
A
315
316 class FatEntry {
14957cd0
A
317 WTF_MAKE_FAST_ALLOCATED;
318 public:
93a37866
A
319 FatEntry(intptr_t bits)
320 : m_bits(bits & ~SlimFlag)
321 {
322 }
323
324 intptr_t m_bits; // always has FatFlag set and exactly matches what the bits would have been if this wasn't fat.
325
ed1e77d3 326 RefPtr<WatchpointSet> m_watchpoints;
f9bf01c6
A
327 };
328
93a37866 329 SymbolTableEntry& copySlow(const SymbolTableEntry&);
ed1e77d3 330 JS_EXPORT_PRIVATE void notifyWriteSlow(VM&, JSValue, const FireDetail&);
93a37866
A
331
332 bool isFat() const
333 {
334 return !(m_bits & SlimFlag);
335 }
336
337 const FatEntry* fatEntry() const
338 {
339 ASSERT(isFat());
340 return bitwise_cast<const FatEntry*>(m_bits);
341 }
342
343 FatEntry* fatEntry()
344 {
345 ASSERT(isFat());
346 return bitwise_cast<FatEntry*>(m_bits);
347 }
348
349 FatEntry* inflate()
350 {
351 if (LIKELY(isFat()))
352 return fatEntry();
353 return inflateSlow();
354 }
355
356 FatEntry* inflateSlow();
357
358 ALWAYS_INLINE intptr_t bits() const
359 {
360 if (isFat())
361 return fatEntry()->m_bits;
362 return m_bits;
363 }
364
365 ALWAYS_INLINE intptr_t& bits()
366 {
367 if (isFat())
368 return fatEntry()->m_bits;
369 return m_bits;
370 }
371
372 void freeFatEntry()
373 {
374 if (LIKELY(!isFat()))
375 return;
376 freeFatEntrySlow();
377 }
378
379 JS_EXPORT_PRIVATE void freeFatEntrySlow();
380
ed1e77d3 381 void pack(VarOffset offset, bool isWatchable, bool readOnly, bool dontEnum)
93a37866
A
382 {
383 ASSERT(!isFat());
384 intptr_t& bitsRef = bits();
ed1e77d3
A
385 bitsRef =
386 (static_cast<intptr_t>(offset.rawOffset()) << FlagBits) | NotNullFlag | SlimFlag;
93a37866
A
387 if (readOnly)
388 bitsRef |= ReadOnlyFlag;
389 if (dontEnum)
390 bitsRef |= DontEnumFlag;
ed1e77d3
A
391 switch (offset.kind()) {
392 case VarKind::Scope:
393 if (isWatchable)
394 bitsRef |= ScopeKindBits;
395 else
396 bitsRef |= UnwatchableScopeKindBits;
397 break;
398 case VarKind::Stack:
399 bitsRef |= StackKindBits;
400 break;
401 case VarKind::DirectArgument:
402 bitsRef |= DirectArgumentKindBits;
403 break;
404 default:
405 RELEASE_ASSERT_NOT_REACHED();
406 break;
407 }
93a37866
A
408 }
409
ed1e77d3 410 static bool isValidVarOffset(VarOffset offset)
93a37866 411 {
ed1e77d3 412 return ((static_cast<intptr_t>(offset.rawOffset()) << FlagBits) >> FlagBits) == static_cast<intptr_t>(offset.rawOffset());
93a37866
A
413 }
414
415 intptr_t m_bits;
416};
417
418struct SymbolTableIndexHashTraits : HashTraits<SymbolTableEntry> {
419 static const bool needsDestruction = true;
420};
421
ed1e77d3 422class SymbolTable final : public JSCell {
93a37866
A
423public:
424 typedef JSCell Base;
ed1e77d3 425 static const unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal;
93a37866 426
ed1e77d3
A
427 typedef HashMap<RefPtr<UniquedStringImpl>, SymbolTableEntry, IdentifierRepHash, HashTraits<RefPtr<UniquedStringImpl>>, SymbolTableIndexHashTraits> Map;
428 typedef HashMap<RefPtr<UniquedStringImpl>, GlobalVariableID, IdentifierRepHash> UniqueIDMap;
429 typedef HashMap<RefPtr<UniquedStringImpl>, RefPtr<TypeSet>, IdentifierRepHash> UniqueTypeSetMap;
430 typedef HashMap<VarOffset, RefPtr<UniquedStringImpl>> OffsetToVariableMap;
431 typedef Vector<SymbolTableEntry*> LocalToEntryVec;
81345200
A
432
433 static SymbolTable* create(VM& vm)
93a37866 434 {
81345200
A
435 SymbolTable* symbolTable = new (NotNull, allocateCell<SymbolTable>(vm.heap)) SymbolTable(vm);
436 symbolTable->finishCreation(vm);
437 return symbolTable;
93a37866 438 }
ed1e77d3
A
439
440 static SymbolTable* createNameScopeTable(VM& vm, const Identifier& ident, unsigned attributes)
441 {
442 SymbolTable* result = create(vm);
443 result->add(ident.impl(), SymbolTableEntry(VarOffset(ScopeOffset(0)), attributes));
444 return result;
445 }
446
93a37866 447 static const bool needsDestruction = true;
93a37866
A
448 static void destroy(JSCell*);
449
450 static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
451 {
ed1e77d3 452 return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
93a37866
A
453 }
454
81345200 455 // You must hold the lock until after you're done with the iterator.
ed1e77d3 456 Map::iterator find(const ConcurrentJITLocker&, UniquedStringImpl* key)
81345200
A
457 {
458 return m_map.find(key);
459 }
460
ed1e77d3 461 Map::iterator find(const GCSafeConcurrentJITLocker&, UniquedStringImpl* key)
81345200
A
462 {
463 return m_map.find(key);
464 }
465
ed1e77d3 466 SymbolTableEntry get(const ConcurrentJITLocker&, UniquedStringImpl* key)
81345200
A
467 {
468 return m_map.get(key);
469 }
470
ed1e77d3 471 SymbolTableEntry get(UniquedStringImpl* key)
81345200
A
472 {
473 ConcurrentJITLocker locker(m_lock);
474 return get(locker, key);
475 }
476
ed1e77d3 477 SymbolTableEntry inlineGet(const ConcurrentJITLocker&, UniquedStringImpl* key)
81345200
A
478 {
479 return m_map.inlineGet(key);
480 }
481
ed1e77d3 482 SymbolTableEntry inlineGet(UniquedStringImpl* key)
81345200
A
483 {
484 ConcurrentJITLocker locker(m_lock);
485 return inlineGet(locker, key);
486 }
487
488 Map::iterator begin(const ConcurrentJITLocker&)
489 {
490 return m_map.begin();
491 }
492
493 Map::iterator end(const ConcurrentJITLocker&)
494 {
495 return m_map.end();
496 }
497
498 Map::iterator end(const GCSafeConcurrentJITLocker&)
499 {
500 return m_map.end();
501 }
502
503 size_t size(const ConcurrentJITLocker&) const
504 {
505 return m_map.size();
506 }
507
508 size_t size() const
509 {
510 ConcurrentJITLocker locker(m_lock);
511 return size(locker);
512 }
513
ed1e77d3
A
514 ScopeOffset maxScopeOffset() const
515 {
516 return m_maxScopeOffset;
517 }
518
519 void didUseScopeOffset(ScopeOffset offset)
520 {
521 if (!m_maxScopeOffset || m_maxScopeOffset < offset)
522 m_maxScopeOffset = offset;
523 }
524
525 void didUseVarOffset(VarOffset offset)
81345200 526 {
ed1e77d3
A
527 if (offset.isScope())
528 didUseScopeOffset(offset.scopeOffset());
81345200
A
529 }
530
ed1e77d3
A
531 unsigned scopeSize() const
532 {
533 ScopeOffset maxScopeOffset = this->maxScopeOffset();
534
535 // Do some calculation that relies on invalid scope offset plus one being zero.
536 unsigned fastResult = maxScopeOffset.offsetUnchecked() + 1;
537
538 // Assert that this works.
539 ASSERT(fastResult == (!maxScopeOffset ? 0 : maxScopeOffset.offset() + 1));
540
541 return fastResult;
542 }
543
544 ScopeOffset nextScopeOffset() const
545 {
546 return ScopeOffset(scopeSize());
547 }
548
549 ScopeOffset takeNextScopeOffset(const ConcurrentJITLocker&)
550 {
551 ScopeOffset result = nextScopeOffset();
552 m_maxScopeOffset = result;
553 return result;
554 }
555
556 ScopeOffset takeNextScopeOffset()
557 {
558 ConcurrentJITLocker locker(m_lock);
559 return takeNextScopeOffset(locker);
560 }
561
562 void add(const ConcurrentJITLocker&, UniquedStringImpl* key, const SymbolTableEntry& entry)
563 {
564 RELEASE_ASSERT(!m_localToEntry);
565 didUseVarOffset(entry.varOffset());
566 Map::AddResult result = m_map.add(key, entry);
567 ASSERT_UNUSED(result, result.isNewEntry);
568 }
569
570 void add(UniquedStringImpl* key, const SymbolTableEntry& entry)
81345200
A
571 {
572 ConcurrentJITLocker locker(m_lock);
573 add(locker, key, entry);
574 }
575
ed1e77d3 576 void set(const ConcurrentJITLocker&, UniquedStringImpl* key, const SymbolTableEntry& entry)
81345200 577 {
ed1e77d3
A
578 RELEASE_ASSERT(!m_localToEntry);
579 didUseVarOffset(entry.varOffset());
580 m_map.set(key, entry);
81345200
A
581 }
582
ed1e77d3 583 void set(UniquedStringImpl* key, const SymbolTableEntry& entry)
81345200
A
584 {
585 ConcurrentJITLocker locker(m_lock);
586 set(locker, key, entry);
587 }
588
ed1e77d3 589 bool contains(const ConcurrentJITLocker&, UniquedStringImpl* key)
81345200
A
590 {
591 return m_map.contains(key);
592 }
593
ed1e77d3 594 bool contains(UniquedStringImpl* key)
81345200
A
595 {
596 ConcurrentJITLocker locker(m_lock);
597 return contains(locker, key);
598 }
599
ed1e77d3
A
600 // The principle behind ScopedArgumentsTable modifications is that we will create one and
601 // leave it unlocked - thereby allowing in-place changes - until someone asks for a pointer to
602 // the table. Then, we will lock it. Then both our future changes and their future changes
603 // will first have to make a copy. This discipline means that usually when we create a
604 // ScopedArguments object, we don't have to make a copy of the ScopedArgumentsTable - instead
605 // we just take a reference to one that we already have.
606
607 uint32_t argumentsLength() const
608 {
609 if (!m_arguments)
610 return 0;
611 return m_arguments->length();
612 }
81345200 613
ed1e77d3 614 void setArgumentsLength(VM& vm, uint32_t length)
81345200 615 {
ed1e77d3
A
616 if (UNLIKELY(!m_arguments))
617 m_arguments.set(vm, this, ScopedArgumentsTable::create(vm));
618 m_arguments.set(vm, this, m_arguments->setLength(vm, length));
81345200 619 }
ed1e77d3
A
620
621 ScopeOffset argumentOffset(uint32_t i) const
622 {
623 ASSERT_WITH_SECURITY_IMPLICATION(m_arguments);
624 return m_arguments->get(i);
625 }
626
627 void setArgumentOffset(VM& vm, uint32_t i, ScopeOffset offset)
628 {
629 ASSERT_WITH_SECURITY_IMPLICATION(m_arguments);
630 m_arguments.set(vm, this, m_arguments->set(vm, i, offset));
631 }
632
633 ScopedArgumentsTable* arguments() const
634 {
635 if (!m_arguments)
636 return nullptr;
637 m_arguments->lock();
638 return m_arguments.get();
639 }
640
641 const LocalToEntryVec& localToEntry(const ConcurrentJITLocker&);
642 SymbolTableEntry* entryFor(const ConcurrentJITLocker&, ScopeOffset);
643
644 GlobalVariableID uniqueIDForVariable(const ConcurrentJITLocker&, UniquedStringImpl* key, VM&);
645 GlobalVariableID uniqueIDForOffset(const ConcurrentJITLocker&, VarOffset, VM&);
646 RefPtr<TypeSet> globalTypeSetForOffset(const ConcurrentJITLocker&, VarOffset, VM&);
647 RefPtr<TypeSet> globalTypeSetForVariable(const ConcurrentJITLocker&, UniquedStringImpl* key, VM&);
648
649 bool usesNonStrictEval() { return m_usesNonStrictEval; }
650 void setUsesNonStrictEval(bool usesNonStrictEval) { m_usesNonStrictEval = usesNonStrictEval; }
93a37866 651
ed1e77d3 652 SymbolTable* cloneScopePart(VM&);
93a37866 653
ed1e77d3 654 void prepareForTypeProfiling(const ConcurrentJITLocker&);
81345200 655
ed1e77d3 656 InferredValue* singletonScope() { return m_singletonScope.get(); }
81345200
A
657
658 static void visitChildren(JSCell*, SlotVisitor&);
659
660 DECLARE_EXPORT_INFO;
93a37866 661
93a37866 662private:
81345200
A
663 JS_EXPORT_PRIVATE SymbolTable(VM&);
664 ~SymbolTable();
ed1e77d3
A
665
666 JS_EXPORT_PRIVATE void finishCreation(VM&);
81345200
A
667
668 Map m_map;
ed1e77d3 669 ScopeOffset m_maxScopeOffset;
81345200 670
ed1e77d3
A
671 struct TypeProfilingRareData {
672 UniqueIDMap m_uniqueIDMap;
673 OffsetToVariableMap m_offsetToVariableMap;
674 UniqueTypeSetMap m_uniqueTypeSetMap;
675 };
676 std::unique_ptr<TypeProfilingRareData> m_typeProfilingRareData;
93a37866 677
ed1e77d3
A
678 bool m_usesNonStrictEval;
679
680 WriteBarrier<ScopedArgumentsTable> m_arguments;
681 WriteBarrier<InferredValue> m_singletonScope;
81345200 682
ed1e77d3 683 std::unique_ptr<LocalToEntryVec> m_localToEntry;
81345200
A
684
685public:
81345200 686 mutable ConcurrentJITLock m_lock;
93a37866
A
687};
688
9dae56ea
A
689} // namespace JSC
690
691#endif // SymbolTable_h