]> git.saurik.com Git - apple/javascriptcore.git/blob - bytecode/CodeBlock.h
65dab955e68f14757c5ab1a29e197428d0cc8e98
[apple/javascriptcore.git] / bytecode / CodeBlock.h
1 /*
2 * Copyright (C) 2008-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #ifndef CodeBlock_h
31 #define CodeBlock_h
32
33 #include "ArrayProfile.h"
34 #include "ByValInfo.h"
35 #include "BytecodeConventions.h"
36 #include "BytecodeLivenessAnalysis.h"
37 #include "CallLinkInfo.h"
38 #include "CallReturnOffsetToBytecodeOffset.h"
39 #include "CodeBlockHash.h"
40 #include "CodeBlockSet.h"
41 #include "ConcurrentJITLock.h"
42 #include "CodeOrigin.h"
43 #include "CodeType.h"
44 #include "CompactJITCodeMap.h"
45 #include "DFGCommon.h"
46 #include "DFGCommonData.h"
47 #include "DFGExitProfile.h"
48 #include "DeferredCompilationCallback.h"
49 #include "EvalCodeCache.h"
50 #include "ExecutionCounter.h"
51 #include "ExpressionRangeInfo.h"
52 #include "HandlerInfo.h"
53 #include "ObjectAllocationProfile.h"
54 #include "Options.h"
55 #include "PutPropertySlot.h"
56 #include "Instruction.h"
57 #include "JITCode.h"
58 #include "JITWriteBarrier.h"
59 #include "JSGlobalObject.h"
60 #include "JumpTable.h"
61 #include "LLIntCallLinkInfo.h"
62 #include "LazyOperandValueProfile.h"
63 #include "ProfilerCompilation.h"
64 #include "ProfilerJettisonReason.h"
65 #include "RegExpObject.h"
66 #include "StructureStubInfo.h"
67 #include "UnconditionalFinalizer.h"
68 #include "ValueProfile.h"
69 #include "VirtualRegister.h"
70 #include "Watchpoint.h"
71 #include <wtf/Bag.h>
72 #include <wtf/FastMalloc.h>
73 #include <wtf/RefCountedArray.h>
74 #include <wtf/RefPtr.h>
75 #include <wtf/SegmentedVector.h>
76 #include <wtf/Vector.h>
77 #include <wtf/text/WTFString.h>
78
79 namespace JSC {
80
81 class ExecState;
82 class LLIntOffsetsExtractor;
83 class RepatchBuffer;
84 class TypeLocation;
85
86 enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
87
88 class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
89 WTF_MAKE_FAST_ALLOCATED;
90 friend class BytecodeLivenessAnalysis;
91 friend class JIT;
92 friend class LLIntOffsetsExtractor;
93 public:
94 enum CopyParsedBlockTag { CopyParsedBlock };
95 protected:
96 CodeBlock(CopyParsedBlockTag, CodeBlock& other);
97
98 CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
99
100 WriteBarrier<JSGlobalObject> m_globalObject;
101 Heap* m_heap;
102
103 public:
104 JS_EXPORT_PRIVATE virtual ~CodeBlock();
105
106 UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
107
108 CString inferredName() const;
109 CodeBlockHash hash() const;
110 bool hasHash() const;
111 bool isSafeToComputeHash() const;
112 CString hashAsStringIfPossible() const;
113 CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
114 CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
115 void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
116 void dump(PrintStream&) const;
117
118 int numParameters() const { return m_numParameters; }
119 void setNumParameters(int newValue);
120
121 int* addressOfNumParameters() { return &m_numParameters; }
122 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
123
124 CodeBlock* alternative() { return m_alternative.get(); }
125 PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
126 void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
127
128 template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
129 {
130 Functor f(std::forward<Functor>(functor));
131 Vector<CodeBlock*, 4> codeBlocks;
132 codeBlocks.append(this);
133
134 while (!codeBlocks.isEmpty()) {
135 CodeBlock* currentCodeBlock = codeBlocks.takeLast();
136 f(currentCodeBlock);
137
138 if (CodeBlock* alternative = currentCodeBlock->alternative())
139 codeBlocks.append(alternative);
140 if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
141 codeBlocks.append(osrEntryBlock);
142 }
143 }
144
145 CodeSpecializationKind specializationKind() const
146 {
147 return specializationFromIsConstruct(m_isConstructor);
148 }
149
150 CodeBlock* baselineAlternative();
151
152 // FIXME: Get rid of this.
153 // https://bugs.webkit.org/show_bug.cgi?id=123677
154 CodeBlock* baselineVersion();
155
156 void visitAggregate(SlotVisitor&);
157
158 void dumpSource();
159 void dumpSource(PrintStream&);
160
161 void dumpBytecode();
162 void dumpBytecode(PrintStream&);
163 void dumpBytecode(
164 PrintStream&, unsigned bytecodeOffset,
165 const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
166 void printStructures(PrintStream&, const Instruction*);
167 void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
168
169 bool isStrictMode() const { return m_isStrictMode; }
170 ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
171
172 inline bool isKnownNotImmediate(int index)
173 {
174 if (index == m_thisRegister.offset() && !m_isStrictMode)
175 return true;
176
177 if (isConstantRegisterIndex(index))
178 return getConstant(index).isCell();
179
180 return false;
181 }
182
183 ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
184 {
185 return index >= m_numVars;
186 }
187
188 enum class RequiredHandler {
189 CatchHandler,
190 AnyHandler
191 };
192 HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
193 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
194 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
195 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
196 int& startOffset, int& endOffset, unsigned& line, unsigned& column);
197
198 void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
199 void getStubInfoMap(StubInfoMap& result);
200
201 void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result);
202 void getCallLinkInfoMap(CallLinkInfoMap& result);
203
204 #if ENABLE(JIT)
205 StructureStubInfo* addStubInfo();
206 Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
207 Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
208
209 // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
210 // stub info.
211 StructureStubInfo* findStubInfo(CodeOrigin);
212
213 void resetStub(StructureStubInfo&);
214
215 ByValInfo& getByValInfo(unsigned bytecodeIndex)
216 {
217 return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
218 }
219
220 CallLinkInfo* addCallLinkInfo();
221 Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
222 Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
223
224 // This is a slow function call used primarily for compiling OSR exits in the case
225 // that there had been inlining. Chances are if you want to use this, you're really
226 // looking for a CallLinkInfoMap to amortize the cost of calling this.
227 CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
228 #endif // ENABLE(JIT)
229
230 void unlinkIncomingCalls();
231
232 #if ENABLE(JIT)
233 void unlinkCalls();
234
235 void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
236 void linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode*);
237 #endif // ENABLE(JIT)
238
239 void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
240
241 void setJITCodeMap(std::unique_ptr<CompactJITCodeMap> jitCodeMap)
242 {
243 m_jitCodeMap = WTF::move(jitCodeMap);
244 }
245 CompactJITCodeMap* jitCodeMap()
246 {
247 return m_jitCodeMap.get();
248 }
249
250 unsigned bytecodeOffset(Instruction* returnAddress)
251 {
252 RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
253 return static_cast<Instruction*>(returnAddress) - instructions().begin();
254 }
255
256 unsigned numberOfInstructions() const { return m_instructions.size(); }
257 RefCountedArray<Instruction>& instructions() { return m_instructions; }
258 const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
259
260 size_t predictedMachineCodeSize();
261
262 bool usesOpcode(OpcodeID);
263
264 unsigned instructionCount() const { return m_instructions.size(); }
265
266 // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
267 void install();
268
269 // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
270 PassRefPtr<CodeBlock> newReplacement();
271
272 void setJITCode(PassRefPtr<JITCode> code)
273 {
274 ASSERT(m_heap->isDeferred());
275 m_heap->reportExtraMemoryAllocated(code->size());
276 ConcurrentJITLocker locker(m_lock);
277 WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
278 m_jitCode = code;
279 }
280 PassRefPtr<JITCode> jitCode() { return m_jitCode; }
281 JITCode::JITType jitType() const
282 {
283 JITCode* jitCode = m_jitCode.get();
284 WTF::loadLoadFence();
285 JITCode::JITType result = JITCode::jitTypeFor(jitCode);
286 WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
287 return result;
288 }
289
290 bool hasBaselineJITProfiling() const
291 {
292 return jitType() == JITCode::BaselineJIT;
293 }
294
295 #if ENABLE(JIT)
296 virtual CodeBlock* replacement() = 0;
297
298 virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
299 DFG::CapabilityLevel capabilityLevel();
300 DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
301
302 bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
303 bool hasOptimizedReplacement(); // the typeToReplace is my JITType
304 #endif
305
306 void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr);
307
308 ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
309
310 void setVM(VM* vm) { m_vm = vm; }
311 VM* vm() { return m_vm; }
312
313 void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
314 VirtualRegister thisRegister() const { return m_thisRegister; }
315
316 bool usesEval() const { return m_unlinkedCode->usesEval(); }
317
318 void setScopeRegister(VirtualRegister scopeRegister)
319 {
320 ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid());
321 m_scopeRegister = scopeRegister;
322 }
323
324 VirtualRegister scopeRegister() const
325 {
326 return m_scopeRegister;
327 }
328
329 void setActivationRegister(VirtualRegister activationRegister)
330 {
331 m_lexicalEnvironmentRegister = activationRegister;
332 }
333
334 VirtualRegister activationRegister() const
335 {
336 ASSERT(m_lexicalEnvironmentRegister.isValid());
337 return m_lexicalEnvironmentRegister;
338 }
339
340 VirtualRegister uncheckedActivationRegister()
341 {
342 return m_lexicalEnvironmentRegister;
343 }
344
345 bool needsActivation() const
346 {
347 ASSERT(m_lexicalEnvironmentRegister.isValid() == m_needsActivation);
348 return m_needsActivation;
349 }
350
351 CodeType codeType() const { return m_unlinkedCode->codeType(); }
352 PutPropertySlot::Context putByIdContext() const
353 {
354 if (codeType() == EvalCode)
355 return PutPropertySlot::PutByIdEval;
356 return PutPropertySlot::PutById;
357 }
358
359 SourceProvider* source() const { return m_source.get(); }
360 unsigned sourceOffset() const { return m_sourceOffset; }
361 unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
362
363 size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
364 unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
365
366 void clearEvalCache();
367
368 String nameForRegister(VirtualRegister);
369
370 #if ENABLE(JIT)
371 void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
372 size_t numberOfByValInfos() const { return m_byValInfos.size(); }
373 ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
374 #endif
375
376 unsigned numberOfArgumentValueProfiles()
377 {
378 ASSERT(m_numParameters >= 0);
379 ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
380 return m_argumentValueProfiles.size();
381 }
382 ValueProfile* valueProfileForArgument(unsigned argumentIndex)
383 {
384 ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
385 ASSERT(result->m_bytecodeOffset == -1);
386 return result;
387 }
388
389 unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
390 ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
391 ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset);
392 SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
393 {
394 return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
395 }
396
397 unsigned totalNumberOfValueProfiles()
398 {
399 return numberOfArgumentValueProfiles() + numberOfValueProfiles();
400 }
401 ValueProfile* getFromAllValueProfiles(unsigned index)
402 {
403 if (index < numberOfArgumentValueProfiles())
404 return valueProfileForArgument(index);
405 return valueProfile(index - numberOfArgumentValueProfiles());
406 }
407
408 RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
409 {
410 m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
411 return &m_rareCaseProfiles.last();
412 }
413 unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
414 RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
415 RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
416
417 bool likelyToTakeSlowCase(int bytecodeOffset)
418 {
419 if (!hasBaselineJITProfiling())
420 return false;
421 unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
422 return value >= Options::likelyToTakeSlowCaseMinimumCount();
423 }
424
425 bool couldTakeSlowCase(int bytecodeOffset)
426 {
427 if (!hasBaselineJITProfiling())
428 return false;
429 unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
430 return value >= Options::couldTakeSlowCaseMinimumCount();
431 }
432
433 RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
434 {
435 m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
436 return &m_specialFastCaseProfiles.last();
437 }
438 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
439 RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
440 RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
441 {
442 return tryBinarySearch<RareCaseProfile, int>(
443 m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
444 getRareCaseProfileBytecodeOffset);
445 }
446
447 bool likelyToTakeSpecialFastCase(int bytecodeOffset)
448 {
449 if (!hasBaselineJITProfiling())
450 return false;
451 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
452 return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
453 }
454
455 bool couldTakeSpecialFastCase(int bytecodeOffset)
456 {
457 if (!hasBaselineJITProfiling())
458 return false;
459 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
460 return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
461 }
462
463 bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
464 {
465 if (!hasBaselineJITProfiling())
466 return false;
467 unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
468 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
469 unsigned value = slowCaseCount - specialFastCaseCount;
470 return value >= Options::likelyToTakeSlowCaseMinimumCount();
471 }
472
473 bool likelyToTakeAnySlowCase(int bytecodeOffset)
474 {
475 if (!hasBaselineJITProfiling())
476 return false;
477 unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
478 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
479 unsigned value = slowCaseCount + specialFastCaseCount;
480 return value >= Options::likelyToTakeSlowCaseMinimumCount();
481 }
482
483 unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
484 const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
485 ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
486 {
487 m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
488 return &m_arrayProfiles.last();
489 }
490 ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
491 ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
492
493 // Exception handling support
494
495 size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
496 HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
497
498 bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
499
500 #if ENABLE(DFG_JIT)
501 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
502 {
503 return m_jitCode->dfgCommon()->codeOrigins;
504 }
505
506 // Having code origins implies that there has been some inlining.
507 bool hasCodeOrigins()
508 {
509 return JITCode::isOptimizingJIT(jitType());
510 }
511
512 bool canGetCodeOrigin(unsigned index)
513 {
514 if (!hasCodeOrigins())
515 return false;
516 return index < codeOrigins().size();
517 }
518
519 CodeOrigin codeOrigin(unsigned index)
520 {
521 return codeOrigins()[index];
522 }
523
524 bool addFrequentExitSite(const DFG::FrequentExitSite& site)
525 {
526 ASSERT(JITCode::isBaselineCode(jitType()));
527 ConcurrentJITLocker locker(m_lock);
528 return m_exitProfile.add(locker, site);
529 }
530
531 bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const
532 {
533 return m_exitProfile.hasExitSite(locker, site);
534 }
535 bool hasExitSite(const DFG::FrequentExitSite& site) const
536 {
537 ConcurrentJITLocker locker(m_lock);
538 return hasExitSite(locker, site);
539 }
540
541 DFG::ExitProfile& exitProfile() { return m_exitProfile; }
542
543 CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
544 {
545 return m_lazyOperandValueProfiles;
546 }
547 #endif // ENABLE(DFG_JIT)
548
549 // Constant Pool
550 #if ENABLE(DFG_JIT)
551 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
552 size_t numberOfDFGIdentifiers() const
553 {
554 if (!JITCode::isOptimizingJIT(jitType()))
555 return 0;
556
557 return m_jitCode->dfgCommon()->dfgIdentifiers.size();
558 }
559
560 const Identifier& identifier(int index) const
561 {
562 size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
563 if (static_cast<unsigned>(index) < unlinkedIdentifiers)
564 return m_unlinkedCode->identifier(index);
565 ASSERT(JITCode::isOptimizingJIT(jitType()));
566 return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
567 }
568 #else
569 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
570 const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
571 #endif
572
573 Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
574 Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; }
575 unsigned addConstant(JSValue v)
576 {
577 unsigned result = m_constantRegisters.size();
578 m_constantRegisters.append(WriteBarrier<Unknown>());
579 m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
580 m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
581 return result;
582 }
583
584 unsigned addConstantLazily()
585 {
586 unsigned result = m_constantRegisters.size();
587 m_constantRegisters.append(WriteBarrier<Unknown>());
588 m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other);
589 return result;
590 }
591
592 WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
593 ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
594 ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
595 ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; }
596
597 FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
598 int numberOfFunctionDecls() { return m_functionDecls.size(); }
599 FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
600
601 RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
602
603 unsigned numberOfConstantBuffers() const
604 {
605 if (!m_rareData)
606 return 0;
607 return m_rareData->m_constantBuffers.size();
608 }
609 unsigned addConstantBuffer(const Vector<JSValue>& buffer)
610 {
611 createRareDataIfNecessary();
612 unsigned size = m_rareData->m_constantBuffers.size();
613 m_rareData->m_constantBuffers.append(buffer);
614 return size;
615 }
616
617 Vector<JSValue>& constantBufferAsVector(unsigned index)
618 {
619 ASSERT(m_rareData);
620 return m_rareData->m_constantBuffers[index];
621 }
622 JSValue* constantBuffer(unsigned index)
623 {
624 return constantBufferAsVector(index).data();
625 }
626
627 Heap* heap() const { return m_heap; }
628 JSGlobalObject* globalObject() { return m_globalObject.get(); }
629
630 JSGlobalObject* globalObjectFor(CodeOrigin);
631
632 BytecodeLivenessAnalysis& livenessAnalysis()
633 {
634 {
635 ConcurrentJITLocker locker(m_lock);
636 if (!!m_livenessAnalysis)
637 return *m_livenessAnalysis;
638 }
639 std::unique_ptr<BytecodeLivenessAnalysis> analysis =
640 std::make_unique<BytecodeLivenessAnalysis>(this);
641 {
642 ConcurrentJITLocker locker(m_lock);
643 if (!m_livenessAnalysis)
644 m_livenessAnalysis = WTF::move(analysis);
645 return *m_livenessAnalysis;
646 }
647 }
648
649 void validate();
650
651 // Jump Tables
652
653 size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
654 SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
655 SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
656 void clearSwitchJumpTables()
657 {
658 if (!m_rareData)
659 return;
660 m_rareData->m_switchJumpTables.clear();
661 }
662
663 size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
664 StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
665 StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
666
667
668 SymbolTable* symbolTable() const { return m_symbolTable.get(); }
669
670 EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
671
672 enum ShrinkMode {
673 // Shrink prior to generating machine code that may point directly into vectors.
674 EarlyShrink,
675
676 // Shrink after generating machine code, and after possibly creating new vectors
677 // and appending to others. At this time it is not safe to shrink certain vectors
678 // because we would have generated machine code that references them directly.
679 LateShrink
680 };
681 void shrinkToFit(ShrinkMode);
682
683 // Functions for controlling when JITting kicks in, in a mixed mode
684 // execution world.
685
686 bool checkIfJITThresholdReached()
687 {
688 return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
689 }
690
691 void dontJITAnytimeSoon()
692 {
693 m_llintExecuteCounter.deferIndefinitely();
694 }
695
696 void jitAfterWarmUp()
697 {
698 m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
699 }
700
701 void jitSoon()
702 {
703 m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
704 }
705
706 const BaselineExecutionCounter& llintExecuteCounter() const
707 {
708 return m_llintExecuteCounter;
709 }
710
711 // Functions for controlling when tiered compilation kicks in. This
712 // controls both when the optimizing compiler is invoked and when OSR
713 // entry happens. Two triggers exist: the loop trigger and the return
714 // trigger. In either case, when an addition to m_jitExecuteCounter
715 // causes it to become non-negative, the optimizing compiler is
716 // invoked. This includes a fast check to see if this CodeBlock has
717 // already been optimized (i.e. replacement() returns a CodeBlock
718 // that was optimized with a higher tier JIT than this one). In the
719 // case of the loop trigger, if the optimized compilation succeeds
720 // (or has already succeeded in the past) then OSR is attempted to
721 // redirect program flow into the optimized code.
722
723 // These functions are called from within the optimization triggers,
724 // and are used as a single point at which we define the heuristics
725 // for how much warm-up is mandated before the next optimization
726 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
727 // as this is called from the CodeBlock constructor.
728
729 // When we observe a lot of speculation failures, we trigger a
730 // reoptimization. But each time, we increase the optimization trigger
731 // to avoid thrashing.
732 JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
733 void countReoptimization();
734 #if ENABLE(JIT)
735 unsigned numberOfDFGCompiles();
736
737 int32_t codeTypeThresholdMultiplier() const;
738
739 int32_t adjustedCounterValue(int32_t desiredThreshold);
740
741 int32_t* addressOfJITExecuteCounter()
742 {
743 return &m_jitExecuteCounter.m_counter;
744 }
745
746 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
747 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
748 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
749
750 const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
751
752 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
753
754 // Check if the optimization threshold has been reached, and if not,
755 // adjust the heuristics accordingly. Returns true if the threshold has
756 // been reached.
757 bool checkIfOptimizationThresholdReached();
758
759 // Call this to force the next optimization trigger to fire. This is
760 // rarely wise, since optimization triggers are typically more
761 // expensive than executing baseline code.
762 void optimizeNextInvocation();
763
764 // Call this to prevent optimization from happening again. Note that
765 // optimization will still happen after roughly 2^29 invocations,
766 // so this is really meant to delay that as much as possible. This
767 // is called if optimization failed, and we expect it to fail in
768 // the future as well.
769 void dontOptimizeAnytimeSoon();
770
771 // Call this to reinitialize the counter to its starting state,
772 // forcing a warm-up to happen before the next optimization trigger
773 // fires. This is called in the CodeBlock constructor. It also
774 // makes sense to call this if an OSR exit occurred. Note that
775 // OSR exit code is code generated, so the value of the execute
776 // counter that this corresponds to is also available directly.
777 void optimizeAfterWarmUp();
778
779 // Call this to force an optimization trigger to fire only after
780 // a lot of warm-up.
781 void optimizeAfterLongWarmUp();
782
783 // Call this to cause an optimization trigger to fire soon, but
784 // not necessarily the next one. This makes sense if optimization
785 // succeeds. Successfuly optimization means that all calls are
786 // relinked to the optimized code, so this only affects call
787 // frames that are still executing this CodeBlock. The value here
788 // is tuned to strike a balance between the cost of OSR entry
789 // (which is too high to warrant making every loop back edge to
790 // trigger OSR immediately) and the cost of executing baseline
791 // code (which is high enough that we don't necessarily want to
792 // have a full warm-up). The intuition for calling this instead of
793 // optimizeNextInvocation() is for the case of recursive functions
794 // with loops. Consider that there may be N call frames of some
795 // recursive function, for a reasonably large value of N. The top
796 // one triggers optimization, and then returns, and then all of
797 // the others return. We don't want optimization to be triggered on
798 // each return, as that would be superfluous. It only makes sense
799 // to trigger optimization if one of those functions becomes hot
800 // in the baseline code.
801 void optimizeSoon();
802
803 void forceOptimizationSlowPathConcurrently();
804
805 void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
806
807 uint32_t osrExitCounter() const { return m_osrExitCounter; }
808
809 void countOSRExit() { m_osrExitCounter++; }
810
811 uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
812
813 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
814
815 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
816 uint32_t exitCountThresholdForReoptimization();
817 uint32_t exitCountThresholdForReoptimizationFromLoop();
818 bool shouldReoptimizeNow();
819 bool shouldReoptimizeFromLoopNow();
820 #else // No JIT
821 void optimizeAfterWarmUp() { }
822 unsigned numberOfDFGCompiles() { return 0; }
823 #endif
824
825 bool shouldOptimizeNow();
826 void updateAllValueProfilePredictions();
827 void updateAllArrayPredictions();
828 void updateAllPredictions();
829
830 unsigned frameRegisterCount();
831 int stackPointerOffset();
832
833 bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
834
835 bool hasDebuggerRequests() const { return m_debuggerRequests; }
836 void* debuggerRequestsAddress() { return &m_debuggerRequests; }
837
838 void addBreakpoint(unsigned numBreakpoints);
839 void removeBreakpoint(unsigned numBreakpoints)
840 {
841 ASSERT(m_numBreakpoints >= numBreakpoints);
842 m_numBreakpoints -= numBreakpoints;
843 }
844
845 enum SteppingMode {
846 SteppingModeDisabled,
847 SteppingModeEnabled
848 };
849 void setSteppingMode(SteppingMode);
850
851 void clearDebuggerRequests()
852 {
853 m_steppingMode = SteppingModeDisabled;
854 m_numBreakpoints = 0;
855 }
856
857 // FIXME: Make these remaining members private.
858
859 int m_numCalleeRegisters;
860 int m_numVars;
861 bool m_isConstructor : 1;
862
863 // This is intentionally public; it's the responsibility of anyone doing any
864 // of the following to hold the lock:
865 //
866 // - Modifying any inline cache in this code block.
867 //
868 // - Quering any inline cache in this code block, from a thread other than
869 // the main thread.
870 //
871 // Additionally, it's only legal to modify the inline cache on the main
872 // thread. This means that the main thread can query the inline cache without
873 // locking. This is crucial since executing the inline cache is effectively
874 // "querying" it.
875 //
876 // Another exception to the rules is that the GC can do whatever it wants
877 // without holding any locks, because the GC is guaranteed to wait until any
878 // concurrent compilation threads finish what they're doing.
879 mutable ConcurrentJITLock m_lock;
880
881 bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
882 bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
883
884 bool m_didFailFTLCompilation : 1;
885 bool m_hasBeenCompiledWithFTL : 1;
886
887 // Internal methods for use by validation code. It would be private if it wasn't
888 // for the fact that we use it from anonymous namespaces.
889 void beginValidationDidFail();
890 NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
891
892 bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live.
893
894 struct RareData {
895 WTF_MAKE_FAST_ALLOCATED;
896 public:
897 Vector<HandlerInfo> m_exceptionHandlers;
898
899 // Buffers used for large array literals
900 Vector<Vector<JSValue>> m_constantBuffers;
901
902 // Jump Tables
903 Vector<SimpleJumpTable> m_switchJumpTables;
904 Vector<StringJumpTable> m_stringSwitchJumpTables;
905
906 EvalCodeCache m_evalCodeCache;
907 };
908
909 protected:
910 virtual void visitWeakReferences(SlotVisitor&) override;
911 virtual void finalizeUnconditionally() override;
912
913 #if ENABLE(DFG_JIT)
914 void tallyFrequentExitSites();
915 #else
916 void tallyFrequentExitSites() { }
917 #endif
918
919 private:
920 friend class CodeBlockSet;
921
922 CodeBlock* specialOSREntryBlockOrNull();
923
924 void noticeIncomingCall(ExecState* callerFrame);
925
926 double optimizationThresholdScalingFactor();
927
928 void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
929
930 void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation)
931 {
932 ASSERT(constants.size() == constantsSourceCodeRepresentation.size());
933 size_t count = constants.size();
934 m_constantRegisters.resizeToFit(count);
935 for (size_t i = 0; i < count; i++)
936 m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
937 m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation;
938 }
939
940 void dumpBytecode(
941 PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
942 const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
943
944 CString registerName(int r) const;
945 CString constantName(int index) const;
946 void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
947 void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
948 void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
949 void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
950 void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
951 enum CacheDumpMode { DumpCaches, DontDumpCaches };
952 void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
953 void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
954 void printPutByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
955 void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
956 void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
957
958 void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
959 void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
960 void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
961 void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
962
963 bool shouldImmediatelyAssumeLivenessDuringScan();
964
965 void propagateTransitions(SlotVisitor&);
966 void determineLiveness(SlotVisitor&);
967
968 void stronglyVisitStrongReferences(SlotVisitor&);
969 void stronglyVisitWeakReferences(SlotVisitor&);
970
971 void createRareDataIfNecessary()
972 {
973 if (!m_rareData)
974 m_rareData = std::make_unique<RareData>();
975 }
976
977 void insertBasicBlockBoundariesForControlFlowProfiler(Vector<Instruction, 0, UnsafeVectorOverflow>&);
978
979 #if ENABLE(JIT)
980 void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
981 void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
982 #endif
983 WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
984 int m_numParameters;
985 union {
986 unsigned m_debuggerRequests;
987 struct {
988 unsigned m_hasDebuggerStatement : 1;
989 unsigned m_steppingMode : 1;
990 unsigned m_numBreakpoints : 30;
991 };
992 };
993 WriteBarrier<ScriptExecutable> m_ownerExecutable;
994 VM* m_vm;
995
996 RefCountedArray<Instruction> m_instructions;
997 WriteBarrier<SymbolTable> m_symbolTable;
998 VirtualRegister m_thisRegister;
999 VirtualRegister m_scopeRegister;
1000 VirtualRegister m_lexicalEnvironmentRegister;
1001
1002 bool m_isStrictMode;
1003 bool m_needsActivation;
1004 bool m_mayBeExecuting;
1005 Atomic<bool> m_visitAggregateHasBeenCalled;
1006
1007 RefPtr<SourceProvider> m_source;
1008 unsigned m_sourceOffset;
1009 unsigned m_firstLineColumnOffset;
1010 unsigned m_codeType;
1011
1012 Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
1013 SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
1014 RefPtr<JITCode> m_jitCode;
1015 #if ENABLE(JIT)
1016 Bag<StructureStubInfo> m_stubInfos;
1017 Vector<ByValInfo> m_byValInfos;
1018 Bag<CallLinkInfo> m_callLinkInfos;
1019 SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
1020 SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
1021 #endif
1022 std::unique_ptr<CompactJITCodeMap> m_jitCodeMap;
1023 #if ENABLE(DFG_JIT)
1024 // This is relevant to non-DFG code blocks that serve as the profiled code block
1025 // for DFG code blocks.
1026 DFG::ExitProfile m_exitProfile;
1027 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
1028 #endif
1029 Vector<ValueProfile> m_argumentValueProfiles;
1030 Vector<ValueProfile> m_valueProfiles;
1031 SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
1032 SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
1033 Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
1034 ArrayProfileVector m_arrayProfiles;
1035 Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
1036
1037 // Constant Pool
1038 COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1039 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
1040 // it, so we're stuck with it for now.
1041 Vector<WriteBarrier<Unknown>> m_constantRegisters;
1042 Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation;
1043 Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
1044 Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
1045
1046 RefPtr<CodeBlock> m_alternative;
1047
1048 BaselineExecutionCounter m_llintExecuteCounter;
1049
1050 BaselineExecutionCounter m_jitExecuteCounter;
1051 int32_t m_totalJITExecutions;
1052 uint32_t m_osrExitCounter;
1053 uint16_t m_optimizationDelayCounter;
1054 uint16_t m_reoptimizationRetryCounter;
1055
1056 mutable CodeBlockHash m_hash;
1057
1058 std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
1059
1060 std::unique_ptr<RareData> m_rareData;
1061 #if ENABLE(JIT)
1062 DFG::CapabilityLevel m_capabilityLevelState;
1063 #endif
1064 };
1065
1066 // Program code is not marked by any function, so we make the global object
1067 // responsible for marking it.
1068
1069 class GlobalCodeBlock : public CodeBlock {
1070 protected:
1071 GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
1072 : CodeBlock(CopyParsedBlock, other)
1073 {
1074 }
1075
1076 GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1077 : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
1078 {
1079 }
1080 };
1081
1082 class ProgramCodeBlock : public GlobalCodeBlock {
1083 public:
1084 ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
1085 : GlobalCodeBlock(CopyParsedBlock, other)
1086 {
1087 }
1088
1089 ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
1090 : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
1091 {
1092 }
1093
1094 #if ENABLE(JIT)
1095 protected:
1096 virtual CodeBlock* replacement() override;
1097 virtual DFG::CapabilityLevel capabilityLevelInternal() override;
1098 #endif
1099 };
1100
1101 class EvalCodeBlock : public GlobalCodeBlock {
1102 public:
1103 EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
1104 : GlobalCodeBlock(CopyParsedBlock, other)
1105 {
1106 }
1107
1108 EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
1109 : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
1110 {
1111 }
1112
1113 const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
1114 unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
1115
1116 #if ENABLE(JIT)
1117 protected:
1118 virtual CodeBlock* replacement() override;
1119 virtual DFG::CapabilityLevel capabilityLevelInternal() override;
1120 #endif
1121
1122 private:
1123 UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
1124 };
1125
1126 class FunctionCodeBlock : public CodeBlock {
1127 public:
1128 FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
1129 : CodeBlock(CopyParsedBlock, other)
1130 {
1131 }
1132
1133 FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1134 : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
1135 {
1136 }
1137
1138 #if ENABLE(JIT)
1139 protected:
1140 virtual CodeBlock* replacement() override;
1141 virtual DFG::CapabilityLevel capabilityLevelInternal() override;
1142 #endif
1143 };
1144
1145 inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
1146 {
1147 RELEASE_ASSERT(inlineCallFrame);
1148 ExecutableBase* executable = inlineCallFrame->executable.get();
1149 RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
1150 return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->specializationKind());
1151 }
1152
1153 inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
1154 {
1155 if (codeOrigin.inlineCallFrame)
1156 return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
1157 return baselineCodeBlock;
1158 }
1159
1160 inline Register& ExecState::r(int index)
1161 {
1162 CodeBlock* codeBlock = this->codeBlock();
1163 if (codeBlock->isConstantRegisterIndex(index))
1164 return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1165 return this[index];
1166 }
1167
1168 inline Register& ExecState::r(VirtualRegister reg)
1169 {
1170 return r(reg.offset());
1171 }
1172
1173 inline Register& ExecState::uncheckedR(int index)
1174 {
1175 RELEASE_ASSERT(index < FirstConstantRegisterIndex);
1176 return this[index];
1177 }
1178
1179 inline Register& ExecState::uncheckedR(VirtualRegister reg)
1180 {
1181 return uncheckedR(reg.offset());
1182 }
1183
1184 inline void CodeBlockSet::mark(void* candidateCodeBlock)
1185 {
1186 // We have to check for 0 and -1 because those are used by the HashMap as markers.
1187 uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
1188
1189 // This checks for both of those nasty cases in one go.
1190 // 0 + 1 = 1
1191 // -1 + 1 = 0
1192 if (value + 1 <= 1)
1193 return;
1194
1195 CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
1196 if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
1197 return;
1198
1199 mark(codeBlock);
1200 }
1201
1202 inline void CodeBlockSet::mark(CodeBlock* codeBlock)
1203 {
1204 if (!codeBlock)
1205 return;
1206
1207 if (codeBlock->m_mayBeExecuting)
1208 return;
1209
1210 codeBlock->m_mayBeExecuting = true;
1211 // We might not have cleared the marks for this CodeBlock, but we need to visit it.
1212 codeBlock->m_visitAggregateHasBeenCalled.store(false, std::memory_order_relaxed);
1213 #if ENABLE(GGC)
1214 m_currentlyExecuting.append(codeBlock);
1215 #endif
1216 }
1217
1218 template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor)
1219 {
1220 switch (type()) {
1221 case ProgramExecutableType: {
1222 if (CodeBlock* codeBlock = jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get())
1223 codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
1224 break;
1225 }
1226
1227 case EvalExecutableType: {
1228 if (CodeBlock* codeBlock = jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get())
1229 codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
1230 break;
1231 }
1232
1233 case FunctionExecutableType: {
1234 Functor f(std::forward<Functor>(functor));
1235 FunctionExecutable* executable = jsCast<FunctionExecutable*>(this);
1236 if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get())
1237 codeBlock->forEachRelatedCodeBlock(f);
1238 if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get())
1239 codeBlock->forEachRelatedCodeBlock(f);
1240 break;
1241 }
1242 default:
1243 RELEASE_ASSERT_NOT_REACHED();
1244 }
1245 }
1246
1247 } // namespace JSC
1248
1249 #endif // CodeBlock_h