]> git.saurik.com Git - apple/javascriptcore.git/blob - runtime/SymbolTable.h
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / runtime / SymbolTable.h
1 /*
2 * Copyright (C) 2007, 2008, 2012-2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #ifndef SymbolTable_h
30 #define SymbolTable_h
31
32 #include "ConcurrentJITLock.h"
33 #include "JSObject.h"
34 #include "VariableWatchpointSet.h"
35 #include <memory>
36 #include <wtf/HashTraits.h>
37 #include <wtf/text/StringImpl.h>
38
39 namespace JSC {
40
41 struct SlowArgument {
42 public:
43 enum Status {
44 Normal = 0,
45 Captured = 1,
46 Deleted = 2
47 };
48
49 SlowArgument()
50 : status(Normal)
51 , index(0)
52 {
53 }
54
55 Status status;
56 int index; // If status is 'Deleted', index is bogus.
57 };
58
59 static ALWAYS_INLINE int missingSymbolMarker() { return std::numeric_limits<int>::max(); }
60
61 // The bit twiddling in this class assumes that every register index is a
62 // reasonably small positive or negative number, and therefore has its high
63 // four bits all set or all unset.
64
65 // In addition to implementing semantics-mandated variable attributes and
66 // implementation-mandated variable indexing, this class also implements
67 // watchpoints to be used for JIT optimizations. Because watchpoints are
68 // meant to be relatively rare, this class optimizes heavily for the case
69 // that they are not being used. To that end, this class uses the thin-fat
70 // idiom: either it is thin, in which case it contains an in-place encoded
71 // word that consists of attributes, the index, and a bit saying that it is
72 // thin; or it is fat, in which case it contains a pointer to a malloc'd
73 // data structure and a bit saying that it is fat. The malloc'd data
74 // structure will be malloced a second time upon copy, to preserve the
75 // property that in-place edits to SymbolTableEntry do not manifest in any
76 // copies. However, the malloc'd FatEntry data structure contains a ref-
77 // counted pointer to a shared WatchpointSet. Thus, in-place edits of the
78 // WatchpointSet will manifest in all copies. Here's a picture:
79 //
80 // SymbolTableEntry --> FatEntry --> VariableWatchpointSet
81 //
82 // If you make a copy of a SymbolTableEntry, you will have:
83 //
84 // original: SymbolTableEntry --> FatEntry --> VariableWatchpointSet
85 // copy: SymbolTableEntry --> FatEntry -----^
86
87 struct SymbolTableEntry {
88 // Use the SymbolTableEntry::Fast class, either via implicit cast or by calling
89 // getFast(), when you (1) only care about isNull(), getIndex(), and isReadOnly(),
90 // and (2) you are in a hot path where you need to minimize the number of times
91 // that you branch on isFat() when getting the bits().
92 class Fast {
93 public:
94 Fast()
95 : m_bits(SlimFlag)
96 {
97 }
98
99 ALWAYS_INLINE Fast(const SymbolTableEntry& entry)
100 : m_bits(entry.bits())
101 {
102 }
103
104 bool isNull() const
105 {
106 return !(m_bits & ~SlimFlag);
107 }
108
109 int getIndex() const
110 {
111 return static_cast<int>(m_bits >> FlagBits);
112 }
113
114 bool isReadOnly() const
115 {
116 return m_bits & ReadOnlyFlag;
117 }
118
119 unsigned getAttributes() const
120 {
121 unsigned attributes = 0;
122 if (m_bits & ReadOnlyFlag)
123 attributes |= ReadOnly;
124 if (m_bits & DontEnumFlag)
125 attributes |= DontEnum;
126 return attributes;
127 }
128
129 bool isFat() const
130 {
131 return !(m_bits & SlimFlag);
132 }
133
134 private:
135 friend struct SymbolTableEntry;
136 intptr_t m_bits;
137 };
138
139 SymbolTableEntry()
140 : m_bits(SlimFlag)
141 {
142 }
143
144 SymbolTableEntry(int index)
145 : m_bits(SlimFlag)
146 {
147 ASSERT(isValidIndex(index));
148 pack(index, false, false);
149 }
150
151 SymbolTableEntry(int index, unsigned attributes)
152 : m_bits(SlimFlag)
153 {
154 ASSERT(isValidIndex(index));
155 pack(index, attributes & ReadOnly, attributes & DontEnum);
156 }
157
158 ~SymbolTableEntry()
159 {
160 freeFatEntry();
161 }
162
163 SymbolTableEntry(const SymbolTableEntry& other)
164 : m_bits(SlimFlag)
165 {
166 *this = other;
167 }
168
169 SymbolTableEntry& operator=(const SymbolTableEntry& other)
170 {
171 if (UNLIKELY(other.isFat()))
172 return copySlow(other);
173 freeFatEntry();
174 m_bits = other.m_bits;
175 return *this;
176 }
177
178 bool isNull() const
179 {
180 return !(bits() & ~SlimFlag);
181 }
182
183 int getIndex() const
184 {
185 return static_cast<int>(bits() >> FlagBits);
186 }
187
188 ALWAYS_INLINE Fast getFast() const
189 {
190 return Fast(*this);
191 }
192
193 ALWAYS_INLINE Fast getFast(bool& wasFat) const
194 {
195 Fast result;
196 wasFat = isFat();
197 if (wasFat)
198 result.m_bits = fatEntry()->m_bits | SlimFlag;
199 else
200 result.m_bits = m_bits;
201 return result;
202 }
203
204 unsigned getAttributes() const
205 {
206 return getFast().getAttributes();
207 }
208
209 void setAttributes(unsigned attributes)
210 {
211 pack(getIndex(), attributes & ReadOnly, attributes & DontEnum);
212 }
213
214 bool isReadOnly() const
215 {
216 return bits() & ReadOnlyFlag;
217 }
218
219 JSValue inferredValue();
220
221 void prepareToWatch(SymbolTable*);
222
223 void addWatchpoint(Watchpoint*);
224
225 VariableWatchpointSet* watchpointSet()
226 {
227 if (!isFat())
228 return 0;
229 return fatEntry()->m_watchpoints.get();
230 }
231
232 ALWAYS_INLINE void notifyWrite(VM& vm, JSValue value)
233 {
234 if (LIKELY(!isFat()))
235 return;
236 notifyWriteSlow(vm, value);
237 }
238
239 private:
240 static const intptr_t SlimFlag = 0x1;
241 static const intptr_t ReadOnlyFlag = 0x2;
242 static const intptr_t DontEnumFlag = 0x4;
243 static const intptr_t NotNullFlag = 0x8;
244 static const intptr_t FlagBits = 4;
245
246 class FatEntry {
247 WTF_MAKE_FAST_ALLOCATED;
248 public:
249 FatEntry(intptr_t bits)
250 : m_bits(bits & ~SlimFlag)
251 {
252 }
253
254 intptr_t m_bits; // always has FatFlag set and exactly matches what the bits would have been if this wasn't fat.
255
256 RefPtr<VariableWatchpointSet> m_watchpoints;
257 };
258
259 SymbolTableEntry& copySlow(const SymbolTableEntry&);
260 JS_EXPORT_PRIVATE void notifyWriteSlow(VM&, JSValue);
261
262 bool isFat() const
263 {
264 return !(m_bits & SlimFlag);
265 }
266
267 const FatEntry* fatEntry() const
268 {
269 ASSERT(isFat());
270 return bitwise_cast<const FatEntry*>(m_bits);
271 }
272
273 FatEntry* fatEntry()
274 {
275 ASSERT(isFat());
276 return bitwise_cast<FatEntry*>(m_bits);
277 }
278
279 FatEntry* inflate()
280 {
281 if (LIKELY(isFat()))
282 return fatEntry();
283 return inflateSlow();
284 }
285
286 FatEntry* inflateSlow();
287
288 ALWAYS_INLINE intptr_t bits() const
289 {
290 if (isFat())
291 return fatEntry()->m_bits;
292 return m_bits;
293 }
294
295 ALWAYS_INLINE intptr_t& bits()
296 {
297 if (isFat())
298 return fatEntry()->m_bits;
299 return m_bits;
300 }
301
302 void freeFatEntry()
303 {
304 if (LIKELY(!isFat()))
305 return;
306 freeFatEntrySlow();
307 }
308
309 JS_EXPORT_PRIVATE void freeFatEntrySlow();
310
311 void pack(int index, bool readOnly, bool dontEnum)
312 {
313 ASSERT(!isFat());
314 intptr_t& bitsRef = bits();
315 bitsRef = (static_cast<intptr_t>(index) << FlagBits) | NotNullFlag | SlimFlag;
316 if (readOnly)
317 bitsRef |= ReadOnlyFlag;
318 if (dontEnum)
319 bitsRef |= DontEnumFlag;
320 }
321
322 bool isValidIndex(int index)
323 {
324 return ((static_cast<intptr_t>(index) << FlagBits) >> FlagBits) == static_cast<intptr_t>(index);
325 }
326
327 intptr_t m_bits;
328 };
329
330 struct SymbolTableIndexHashTraits : HashTraits<SymbolTableEntry> {
331 static const bool needsDestruction = true;
332 };
333
334 class SymbolTable : public JSCell {
335 public:
336 typedef JSCell Base;
337
338 typedef HashMap<RefPtr<StringImpl>, SymbolTableEntry, IdentifierRepHash, HashTraits<RefPtr<StringImpl>>, SymbolTableIndexHashTraits> Map;
339
340 static SymbolTable* create(VM& vm)
341 {
342 SymbolTable* symbolTable = new (NotNull, allocateCell<SymbolTable>(vm.heap)) SymbolTable(vm);
343 symbolTable->finishCreation(vm);
344 return symbolTable;
345 }
346 static const bool needsDestruction = true;
347 static const bool hasImmortalStructure = true;
348 static void destroy(JSCell*);
349
350 static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
351 {
352 return Structure::create(vm, globalObject, prototype, TypeInfo(LeafType, StructureFlags), info());
353 }
354
355 // You must hold the lock until after you're done with the iterator.
356 Map::iterator find(const ConcurrentJITLocker&, StringImpl* key)
357 {
358 return m_map.find(key);
359 }
360
361 Map::iterator find(const GCSafeConcurrentJITLocker&, StringImpl* key)
362 {
363 return m_map.find(key);
364 }
365
366 SymbolTableEntry get(const ConcurrentJITLocker&, StringImpl* key)
367 {
368 return m_map.get(key);
369 }
370
371 SymbolTableEntry get(StringImpl* key)
372 {
373 ConcurrentJITLocker locker(m_lock);
374 return get(locker, key);
375 }
376
377 SymbolTableEntry inlineGet(const ConcurrentJITLocker&, StringImpl* key)
378 {
379 return m_map.inlineGet(key);
380 }
381
382 SymbolTableEntry inlineGet(StringImpl* key)
383 {
384 ConcurrentJITLocker locker(m_lock);
385 return inlineGet(locker, key);
386 }
387
388 Map::iterator begin(const ConcurrentJITLocker&)
389 {
390 return m_map.begin();
391 }
392
393 Map::iterator end(const ConcurrentJITLocker&)
394 {
395 return m_map.end();
396 }
397
398 Map::iterator end(const GCSafeConcurrentJITLocker&)
399 {
400 return m_map.end();
401 }
402
403 size_t size(const ConcurrentJITLocker&) const
404 {
405 return m_map.size();
406 }
407
408 size_t size() const
409 {
410 ConcurrentJITLocker locker(m_lock);
411 return size(locker);
412 }
413
414 Map::AddResult add(const ConcurrentJITLocker&, StringImpl* key, const SymbolTableEntry& entry)
415 {
416 return m_map.add(key, entry);
417 }
418
419 void add(StringImpl* key, const SymbolTableEntry& entry)
420 {
421 ConcurrentJITLocker locker(m_lock);
422 add(locker, key, entry);
423 }
424
425 Map::AddResult set(const ConcurrentJITLocker&, StringImpl* key, const SymbolTableEntry& entry)
426 {
427 return m_map.set(key, entry);
428 }
429
430 void set(StringImpl* key, const SymbolTableEntry& entry)
431 {
432 ConcurrentJITLocker locker(m_lock);
433 set(locker, key, entry);
434 }
435
436 bool contains(const ConcurrentJITLocker&, StringImpl* key)
437 {
438 return m_map.contains(key);
439 }
440
441 bool contains(StringImpl* key)
442 {
443 ConcurrentJITLocker locker(m_lock);
444 return contains(locker, key);
445 }
446
447 bool usesNonStrictEval() { return m_usesNonStrictEval; }
448 void setUsesNonStrictEval(bool usesNonStrictEval) { m_usesNonStrictEval = usesNonStrictEval; }
449
450 int captureStart() const { return m_captureStart; }
451 void setCaptureStart(int captureStart) { m_captureStart = captureStart; }
452
453 int captureEnd() const { return m_captureEnd; }
454 void setCaptureEnd(int captureEnd) { m_captureEnd = captureEnd; }
455
456 int captureCount() const { return -(m_captureEnd - m_captureStart); }
457
458 bool isCaptured(int operand)
459 {
460 return operand <= captureStart() && operand > captureEnd();
461 }
462
463 int parameterCount() { return m_parameterCountIncludingThis - 1; }
464 int parameterCountIncludingThis() { return m_parameterCountIncludingThis; }
465 void setParameterCountIncludingThis(int parameterCountIncludingThis) { m_parameterCountIncludingThis = parameterCountIncludingThis; }
466
467 // 0 if we don't capture any arguments; parameterCount() in length if we do.
468 const SlowArgument* slowArguments() { return m_slowArguments.get(); }
469 void setSlowArguments(std::unique_ptr<SlowArgument[]> slowArguments) { m_slowArguments = WTF::move(slowArguments); }
470
471 SymbolTable* cloneCapturedNames(VM&);
472
473 static void visitChildren(JSCell*, SlotVisitor&);
474
475 DECLARE_EXPORT_INFO;
476
477 protected:
478 static const unsigned StructureFlags = StructureIsImmortal | Base::StructureFlags;
479
480 private:
481 class WatchpointCleanup : public UnconditionalFinalizer {
482 public:
483 WatchpointCleanup(SymbolTable*);
484 virtual ~WatchpointCleanup();
485
486 protected:
487 virtual void finalizeUnconditionally() override;
488
489 private:
490 SymbolTable* m_symbolTable;
491 };
492
493 JS_EXPORT_PRIVATE SymbolTable(VM&);
494 ~SymbolTable();
495
496 Map m_map;
497
498 int m_parameterCountIncludingThis;
499 bool m_usesNonStrictEval;
500
501 int m_captureStart;
502 int m_captureEnd;
503
504 std::unique_ptr<SlowArgument[]> m_slowArguments;
505
506 std::unique_ptr<WatchpointCleanup> m_watchpointCleanup;
507
508 public:
509 InlineWatchpointSet m_functionEnteredOnce;
510
511 mutable ConcurrentJITLock m_lock;
512 };
513
514 } // namespace JSC
515
516 #endif // SymbolTable_h