2 * Copyright (C) 2008-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "ArrayProfile.h"
34 #include "ByValInfo.h"
35 #include "BytecodeConventions.h"
36 #include "BytecodeLivenessAnalysis.h"
37 #include "CallLinkInfo.h"
38 #include "CallReturnOffsetToBytecodeOffset.h"
39 #include "CodeBlockHash.h"
40 #include "CodeBlockSet.h"
41 #include "ConcurrentJITLock.h"
42 #include "CodeOrigin.h"
44 #include "CompactJITCodeMap.h"
45 #include "DFGCommon.h"
46 #include "DFGCommonData.h"
47 #include "DFGExitProfile.h"
48 #include "DeferredCompilationCallback.h"
49 #include "EvalCodeCache.h"
50 #include "ExecutionCounter.h"
51 #include "ExpressionRangeInfo.h"
52 #include "HandlerInfo.h"
53 #include "ObjectAllocationProfile.h"
55 #include "PutPropertySlot.h"
56 #include "Instruction.h"
58 #include "JITWriteBarrier.h"
59 #include "JSGlobalObject.h"
60 #include "JumpTable.h"
61 #include "LLIntCallLinkInfo.h"
62 #include "LazyOperandValueProfile.h"
63 #include "ProfilerCompilation.h"
64 #include "ProfilerJettisonReason.h"
65 #include "RegExpObject.h"
66 #include "StructureStubInfo.h"
67 #include "UnconditionalFinalizer.h"
68 #include "ValueProfile.h"
69 #include "VirtualRegister.h"
70 #include "Watchpoint.h"
72 #include <wtf/FastMalloc.h>
73 #include <wtf/RefCountedArray.h>
74 #include <wtf/RefPtr.h>
75 #include <wtf/SegmentedVector.h>
76 #include <wtf/Vector.h>
77 #include <wtf/text/WTFString.h>
82 class LLIntOffsetsExtractor
;
86 enum ReoptimizationMode
{ DontCountReoptimization
, CountReoptimization
};
88 class CodeBlock
: public ThreadSafeRefCounted
<CodeBlock
>, public UnconditionalFinalizer
, public WeakReferenceHarvester
{
89 WTF_MAKE_FAST_ALLOCATED
;
90 friend class BytecodeLivenessAnalysis
;
92 friend class LLIntOffsetsExtractor
;
94 enum CopyParsedBlockTag
{ CopyParsedBlock
};
96 CodeBlock(CopyParsedBlockTag
, CodeBlock
& other
);
98 CodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
*, JSScope
*, PassRefPtr
<SourceProvider
>, unsigned sourceOffset
, unsigned firstLineColumnOffset
);
100 WriteBarrier
<JSGlobalObject
> m_globalObject
;
104 JS_EXPORT_PRIVATE
virtual ~CodeBlock();
106 UnlinkedCodeBlock
* unlinkedCodeBlock() const { return m_unlinkedCode
.get(); }
108 CString
inferredName() const;
109 CodeBlockHash
hash() const;
110 bool hasHash() const;
111 bool isSafeToComputeHash() const;
112 CString
hashAsStringIfPossible() const;
113 CString
sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
114 CString
sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
115 void dumpAssumingJITType(PrintStream
&, JITCode::JITType
) const;
116 void dump(PrintStream
&) const;
118 int numParameters() const { return m_numParameters
; }
119 void setNumParameters(int newValue
);
121 int* addressOfNumParameters() { return &m_numParameters
; }
122 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock
, m_numParameters
); }
124 CodeBlock
* alternative() { return m_alternative
.get(); }
125 PassRefPtr
<CodeBlock
> releaseAlternative() { return m_alternative
.release(); }
126 void setAlternative(PassRefPtr
<CodeBlock
> alternative
) { m_alternative
= alternative
; }
128 template <typename Functor
> void forEachRelatedCodeBlock(Functor
&& functor
)
130 Functor
f(std::forward
<Functor
>(functor
));
131 Vector
<CodeBlock
*, 4> codeBlocks
;
132 codeBlocks
.append(this);
134 while (!codeBlocks
.isEmpty()) {
135 CodeBlock
* currentCodeBlock
= codeBlocks
.takeLast();
138 if (CodeBlock
* alternative
= currentCodeBlock
->alternative())
139 codeBlocks
.append(alternative
);
140 if (CodeBlock
* osrEntryBlock
= currentCodeBlock
->specialOSREntryBlockOrNull())
141 codeBlocks
.append(osrEntryBlock
);
145 CodeSpecializationKind
specializationKind() const
147 return specializationFromIsConstruct(m_isConstructor
);
150 CodeBlock
* baselineAlternative();
152 // FIXME: Get rid of this.
153 // https://bugs.webkit.org/show_bug.cgi?id=123677
154 CodeBlock
* baselineVersion();
156 void visitAggregate(SlotVisitor
&);
159 void dumpSource(PrintStream
&);
162 void dumpBytecode(PrintStream
&);
164 PrintStream
&, unsigned bytecodeOffset
,
165 const StubInfoMap
& = StubInfoMap(), const CallLinkInfoMap
& = CallLinkInfoMap());
166 void printStructures(PrintStream
&, const Instruction
*);
167 void printStructure(PrintStream
&, const char* name
, const Instruction
*, int operand
);
169 bool isStrictMode() const { return m_isStrictMode
; }
170 ECMAMode
ecmaMode() const { return isStrictMode() ? StrictMode
: NotStrictMode
; }
172 inline bool isKnownNotImmediate(int index
)
174 if (index
== m_thisRegister
.offset() && !m_isStrictMode
)
177 if (isConstantRegisterIndex(index
))
178 return getConstant(index
).isCell();
183 ALWAYS_INLINE
bool isTemporaryRegisterIndex(int index
)
185 return index
>= m_numVars
;
188 enum class RequiredHandler
{
192 HandlerInfo
* handlerForBytecodeOffset(unsigned bytecodeOffset
, RequiredHandler
= RequiredHandler::AnyHandler
);
193 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset
);
194 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset
);
195 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset
, int& divot
,
196 int& startOffset
, int& endOffset
, unsigned& line
, unsigned& column
);
198 void getStubInfoMap(const ConcurrentJITLocker
&, StubInfoMap
& result
);
199 void getStubInfoMap(StubInfoMap
& result
);
201 void getCallLinkInfoMap(const ConcurrentJITLocker
&, CallLinkInfoMap
& result
);
202 void getCallLinkInfoMap(CallLinkInfoMap
& result
);
205 StructureStubInfo
* addStubInfo();
206 Bag
<StructureStubInfo
>::iterator
stubInfoBegin() { return m_stubInfos
.begin(); }
207 Bag
<StructureStubInfo
>::iterator
stubInfoEnd() { return m_stubInfos
.end(); }
209 // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
211 StructureStubInfo
* findStubInfo(CodeOrigin
);
213 void resetStub(StructureStubInfo
&);
215 ByValInfo
& getByValInfo(unsigned bytecodeIndex
)
217 return *(binarySearch
<ByValInfo
, unsigned>(m_byValInfos
, m_byValInfos
.size(), bytecodeIndex
, getByValInfoBytecodeIndex
));
220 CallLinkInfo
* addCallLinkInfo();
221 Bag
<CallLinkInfo
>::iterator
callLinkInfosBegin() { return m_callLinkInfos
.begin(); }
222 Bag
<CallLinkInfo
>::iterator
callLinkInfosEnd() { return m_callLinkInfos
.end(); }
224 // This is a slow function call used primarily for compiling OSR exits in the case
225 // that there had been inlining. Chances are if you want to use this, you're really
226 // looking for a CallLinkInfoMap to amortize the cost of calling this.
227 CallLinkInfo
* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex
);
228 #endif // ENABLE(JIT)
230 void unlinkIncomingCalls();
235 void linkIncomingCall(ExecState
* callerFrame
, CallLinkInfo
*);
236 void linkIncomingPolymorphicCall(ExecState
* callerFrame
, PolymorphicCallNode
*);
237 #endif // ENABLE(JIT)
239 void linkIncomingCall(ExecState
* callerFrame
, LLIntCallLinkInfo
*);
241 void setJITCodeMap(std::unique_ptr
<CompactJITCodeMap
> jitCodeMap
)
243 m_jitCodeMap
= WTF::move(jitCodeMap
);
245 CompactJITCodeMap
* jitCodeMap()
247 return m_jitCodeMap
.get();
250 unsigned bytecodeOffset(Instruction
* returnAddress
)
252 RELEASE_ASSERT(returnAddress
>= instructions().begin() && returnAddress
< instructions().end());
253 return static_cast<Instruction
*>(returnAddress
) - instructions().begin();
256 unsigned numberOfInstructions() const { return m_instructions
.size(); }
257 RefCountedArray
<Instruction
>& instructions() { return m_instructions
; }
258 const RefCountedArray
<Instruction
>& instructions() const { return m_instructions
; }
260 size_t predictedMachineCodeSize();
262 bool usesOpcode(OpcodeID
);
264 unsigned instructionCount() const { return m_instructions
.size(); }
266 // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
269 // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
270 PassRefPtr
<CodeBlock
> newReplacement();
272 void setJITCode(PassRefPtr
<JITCode
> code
)
274 ASSERT(m_heap
->isDeferred());
275 m_heap
->reportExtraMemoryAllocated(code
->size());
276 ConcurrentJITLocker
locker(m_lock
);
277 WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
280 PassRefPtr
<JITCode
> jitCode() { return m_jitCode
; }
281 JITCode::JITType
jitType() const
283 JITCode
* jitCode
= m_jitCode
.get();
284 WTF::loadLoadFence();
285 JITCode::JITType result
= JITCode::jitTypeFor(jitCode
);
286 WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
290 bool hasBaselineJITProfiling() const
292 return jitType() == JITCode::BaselineJIT
;
296 virtual CodeBlock
* replacement() = 0;
298 virtual DFG::CapabilityLevel
capabilityLevelInternal() = 0;
299 DFG::CapabilityLevel
capabilityLevel();
300 DFG::CapabilityLevel
capabilityLevelState() { return m_capabilityLevelState
; }
302 bool hasOptimizedReplacement(JITCode::JITType typeToReplace
);
303 bool hasOptimizedReplacement(); // the typeToReplace is my JITType
306 void jettison(Profiler::JettisonReason
, ReoptimizationMode
= DontCountReoptimization
, const FireDetail
* = nullptr);
308 ScriptExecutable
* ownerExecutable() const { return m_ownerExecutable
.get(); }
310 void setVM(VM
* vm
) { m_vm
= vm
; }
311 VM
* vm() { return m_vm
; }
313 void setThisRegister(VirtualRegister thisRegister
) { m_thisRegister
= thisRegister
; }
314 VirtualRegister
thisRegister() const { return m_thisRegister
; }
316 bool usesEval() const { return m_unlinkedCode
->usesEval(); }
318 void setScopeRegister(VirtualRegister scopeRegister
)
320 ASSERT(scopeRegister
.isLocal() || !scopeRegister
.isValid());
321 m_scopeRegister
= scopeRegister
;
324 VirtualRegister
scopeRegister() const
326 return m_scopeRegister
;
329 void setActivationRegister(VirtualRegister activationRegister
)
331 m_lexicalEnvironmentRegister
= activationRegister
;
334 VirtualRegister
activationRegister() const
336 ASSERT(m_lexicalEnvironmentRegister
.isValid());
337 return m_lexicalEnvironmentRegister
;
340 VirtualRegister
uncheckedActivationRegister()
342 return m_lexicalEnvironmentRegister
;
345 bool needsActivation() const
347 ASSERT(m_lexicalEnvironmentRegister
.isValid() == m_needsActivation
);
348 return m_needsActivation
;
351 CodeType
codeType() const { return m_unlinkedCode
->codeType(); }
352 PutPropertySlot::Context
putByIdContext() const
354 if (codeType() == EvalCode
)
355 return PutPropertySlot::PutByIdEval
;
356 return PutPropertySlot::PutById
;
359 SourceProvider
* source() const { return m_source
.get(); }
360 unsigned sourceOffset() const { return m_sourceOffset
; }
361 unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset
; }
363 size_t numberOfJumpTargets() const { return m_unlinkedCode
->numberOfJumpTargets(); }
364 unsigned jumpTarget(int index
) const { return m_unlinkedCode
->jumpTarget(index
); }
366 void clearEvalCache();
368 String
nameForRegister(VirtualRegister
);
371 void setNumberOfByValInfos(size_t size
) { m_byValInfos
.resizeToFit(size
); }
372 size_t numberOfByValInfos() const { return m_byValInfos
.size(); }
373 ByValInfo
& byValInfo(size_t index
) { return m_byValInfos
[index
]; }
376 unsigned numberOfArgumentValueProfiles()
378 ASSERT(m_numParameters
>= 0);
379 ASSERT(m_argumentValueProfiles
.size() == static_cast<unsigned>(m_numParameters
));
380 return m_argumentValueProfiles
.size();
382 ValueProfile
* valueProfileForArgument(unsigned argumentIndex
)
384 ValueProfile
* result
= &m_argumentValueProfiles
[argumentIndex
];
385 ASSERT(result
->m_bytecodeOffset
== -1);
389 unsigned numberOfValueProfiles() { return m_valueProfiles
.size(); }
390 ValueProfile
* valueProfile(int index
) { return &m_valueProfiles
[index
]; }
391 ValueProfile
* valueProfileForBytecodeOffset(int bytecodeOffset
);
392 SpeculatedType
valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker
& locker
, int bytecodeOffset
)
394 return valueProfileForBytecodeOffset(bytecodeOffset
)->computeUpdatedPrediction(locker
);
397 unsigned totalNumberOfValueProfiles()
399 return numberOfArgumentValueProfiles() + numberOfValueProfiles();
401 ValueProfile
* getFromAllValueProfiles(unsigned index
)
403 if (index
< numberOfArgumentValueProfiles())
404 return valueProfileForArgument(index
);
405 return valueProfile(index
- numberOfArgumentValueProfiles());
408 RareCaseProfile
* addRareCaseProfile(int bytecodeOffset
)
410 m_rareCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
411 return &m_rareCaseProfiles
.last();
413 unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles
.size(); }
414 RareCaseProfile
* rareCaseProfile(int index
) { return &m_rareCaseProfiles
[index
]; }
415 RareCaseProfile
* rareCaseProfileForBytecodeOffset(int bytecodeOffset
);
417 bool likelyToTakeSlowCase(int bytecodeOffset
)
419 if (!hasBaselineJITProfiling())
421 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
422 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
425 bool couldTakeSlowCase(int bytecodeOffset
)
427 if (!hasBaselineJITProfiling())
429 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
430 return value
>= Options::couldTakeSlowCaseMinimumCount();
433 RareCaseProfile
* addSpecialFastCaseProfile(int bytecodeOffset
)
435 m_specialFastCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
436 return &m_specialFastCaseProfiles
.last();
438 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles
.size(); }
439 RareCaseProfile
* specialFastCaseProfile(int index
) { return &m_specialFastCaseProfiles
[index
]; }
440 RareCaseProfile
* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset
)
442 return tryBinarySearch
<RareCaseProfile
, int>(
443 m_specialFastCaseProfiles
, m_specialFastCaseProfiles
.size(), bytecodeOffset
,
444 getRareCaseProfileBytecodeOffset
);
447 bool likelyToTakeSpecialFastCase(int bytecodeOffset
)
449 if (!hasBaselineJITProfiling())
451 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
452 return specialFastCaseCount
>= Options::likelyToTakeSlowCaseMinimumCount();
455 bool couldTakeSpecialFastCase(int bytecodeOffset
)
457 if (!hasBaselineJITProfiling())
459 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
460 return specialFastCaseCount
>= Options::couldTakeSlowCaseMinimumCount();
463 bool likelyToTakeDeepestSlowCase(int bytecodeOffset
)
465 if (!hasBaselineJITProfiling())
467 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
468 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
469 unsigned value
= slowCaseCount
- specialFastCaseCount
;
470 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
473 bool likelyToTakeAnySlowCase(int bytecodeOffset
)
475 if (!hasBaselineJITProfiling())
477 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
478 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
479 unsigned value
= slowCaseCount
+ specialFastCaseCount
;
480 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
483 unsigned numberOfArrayProfiles() const { return m_arrayProfiles
.size(); }
484 const ArrayProfileVector
& arrayProfiles() { return m_arrayProfiles
; }
485 ArrayProfile
* addArrayProfile(unsigned bytecodeOffset
)
487 m_arrayProfiles
.append(ArrayProfile(bytecodeOffset
));
488 return &m_arrayProfiles
.last();
490 ArrayProfile
* getArrayProfile(unsigned bytecodeOffset
);
491 ArrayProfile
* getOrAddArrayProfile(unsigned bytecodeOffset
);
493 // Exception handling support
495 size_t numberOfExceptionHandlers() const { return m_rareData
? m_rareData
->m_exceptionHandlers
.size() : 0; }
496 HandlerInfo
& exceptionHandler(int index
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_exceptionHandlers
[index
]; }
498 bool hasExpressionInfo() { return m_unlinkedCode
->hasExpressionInfo(); }
501 Vector
<CodeOrigin
, 0, UnsafeVectorOverflow
>& codeOrigins()
503 return m_jitCode
->dfgCommon()->codeOrigins
;
506 // Having code origins implies that there has been some inlining.
507 bool hasCodeOrigins()
509 return JITCode::isOptimizingJIT(jitType());
512 bool canGetCodeOrigin(unsigned index
)
514 if (!hasCodeOrigins())
516 return index
< codeOrigins().size();
519 CodeOrigin
codeOrigin(unsigned index
)
521 return codeOrigins()[index
];
524 bool addFrequentExitSite(const DFG::FrequentExitSite
& site
)
526 ASSERT(JITCode::isBaselineCode(jitType()));
527 ConcurrentJITLocker
locker(m_lock
);
528 return m_exitProfile
.add(locker
, site
);
531 bool hasExitSite(const ConcurrentJITLocker
& locker
, const DFG::FrequentExitSite
& site
) const
533 return m_exitProfile
.hasExitSite(locker
, site
);
535 bool hasExitSite(const DFG::FrequentExitSite
& site
) const
537 ConcurrentJITLocker
locker(m_lock
);
538 return hasExitSite(locker
, site
);
541 DFG::ExitProfile
& exitProfile() { return m_exitProfile
; }
543 CompressedLazyOperandValueProfileHolder
& lazyOperandValueProfiles()
545 return m_lazyOperandValueProfiles
;
547 #endif // ENABLE(DFG_JIT)
551 size_t numberOfIdentifiers() const { return m_unlinkedCode
->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
552 size_t numberOfDFGIdentifiers() const
554 if (!JITCode::isOptimizingJIT(jitType()))
557 return m_jitCode
->dfgCommon()->dfgIdentifiers
.size();
560 const Identifier
& identifier(int index
) const
562 size_t unlinkedIdentifiers
= m_unlinkedCode
->numberOfIdentifiers();
563 if (static_cast<unsigned>(index
) < unlinkedIdentifiers
)
564 return m_unlinkedCode
->identifier(index
);
565 ASSERT(JITCode::isOptimizingJIT(jitType()));
566 return m_jitCode
->dfgCommon()->dfgIdentifiers
[index
- unlinkedIdentifiers
];
569 size_t numberOfIdentifiers() const { return m_unlinkedCode
->numberOfIdentifiers(); }
570 const Identifier
& identifier(int index
) const { return m_unlinkedCode
->identifier(index
); }
573 Vector
<WriteBarrier
<Unknown
>>& constants() { return m_constantRegisters
; }
574 Vector
<SourceCodeRepresentation
>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation
; }
575 unsigned addConstant(JSValue v
)
577 unsigned result
= m_constantRegisters
.size();
578 m_constantRegisters
.append(WriteBarrier
<Unknown
>());
579 m_constantRegisters
.last().set(m_globalObject
->vm(), m_ownerExecutable
.get(), v
);
580 m_constantsSourceCodeRepresentation
.append(SourceCodeRepresentation::Other
);
584 unsigned addConstantLazily()
586 unsigned result
= m_constantRegisters
.size();
587 m_constantRegisters
.append(WriteBarrier
<Unknown
>());
588 m_constantsSourceCodeRepresentation
.append(SourceCodeRepresentation::Other
);
592 WriteBarrier
<Unknown
>& constantRegister(int index
) { return m_constantRegisters
[index
- FirstConstantRegisterIndex
]; }
593 ALWAYS_INLINE
bool isConstantRegisterIndex(int index
) const { return index
>= FirstConstantRegisterIndex
; }
594 ALWAYS_INLINE JSValue
getConstant(int index
) const { return m_constantRegisters
[index
- FirstConstantRegisterIndex
].get(); }
595 ALWAYS_INLINE SourceCodeRepresentation
constantSourceCodeRepresentation(int index
) const { return m_constantsSourceCodeRepresentation
[index
- FirstConstantRegisterIndex
]; }
597 FunctionExecutable
* functionDecl(int index
) { return m_functionDecls
[index
].get(); }
598 int numberOfFunctionDecls() { return m_functionDecls
.size(); }
599 FunctionExecutable
* functionExpr(int index
) { return m_functionExprs
[index
].get(); }
601 RegExp
* regexp(int index
) const { return m_unlinkedCode
->regexp(index
); }
603 unsigned numberOfConstantBuffers() const
607 return m_rareData
->m_constantBuffers
.size();
609 unsigned addConstantBuffer(const Vector
<JSValue
>& buffer
)
611 createRareDataIfNecessary();
612 unsigned size
= m_rareData
->m_constantBuffers
.size();
613 m_rareData
->m_constantBuffers
.append(buffer
);
617 Vector
<JSValue
>& constantBufferAsVector(unsigned index
)
620 return m_rareData
->m_constantBuffers
[index
];
622 JSValue
* constantBuffer(unsigned index
)
624 return constantBufferAsVector(index
).data();
627 Heap
* heap() const { return m_heap
; }
628 JSGlobalObject
* globalObject() { return m_globalObject
.get(); }
630 JSGlobalObject
* globalObjectFor(CodeOrigin
);
632 BytecodeLivenessAnalysis
& livenessAnalysis()
635 ConcurrentJITLocker
locker(m_lock
);
636 if (!!m_livenessAnalysis
)
637 return *m_livenessAnalysis
;
639 std::unique_ptr
<BytecodeLivenessAnalysis
> analysis
=
640 std::make_unique
<BytecodeLivenessAnalysis
>(this);
642 ConcurrentJITLocker
locker(m_lock
);
643 if (!m_livenessAnalysis
)
644 m_livenessAnalysis
= WTF::move(analysis
);
645 return *m_livenessAnalysis
;
653 size_t numberOfSwitchJumpTables() const { return m_rareData
? m_rareData
->m_switchJumpTables
.size() : 0; }
654 SimpleJumpTable
& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_switchJumpTables
.append(SimpleJumpTable()); return m_rareData
->m_switchJumpTables
.last(); }
655 SimpleJumpTable
& switchJumpTable(int tableIndex
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_switchJumpTables
[tableIndex
]; }
656 void clearSwitchJumpTables()
660 m_rareData
->m_switchJumpTables
.clear();
663 size_t numberOfStringSwitchJumpTables() const { return m_rareData
? m_rareData
->m_stringSwitchJumpTables
.size() : 0; }
664 StringJumpTable
& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_stringSwitchJumpTables
.append(StringJumpTable()); return m_rareData
->m_stringSwitchJumpTables
.last(); }
665 StringJumpTable
& stringSwitchJumpTable(int tableIndex
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_stringSwitchJumpTables
[tableIndex
]; }
668 SymbolTable
* symbolTable() const { return m_symbolTable
.get(); }
670 EvalCodeCache
& evalCodeCache() { createRareDataIfNecessary(); return m_rareData
->m_evalCodeCache
; }
673 // Shrink prior to generating machine code that may point directly into vectors.
676 // Shrink after generating machine code, and after possibly creating new vectors
677 // and appending to others. At this time it is not safe to shrink certain vectors
678 // because we would have generated machine code that references them directly.
681 void shrinkToFit(ShrinkMode
);
683 // Functions for controlling when JITting kicks in, in a mixed mode
686 bool checkIfJITThresholdReached()
688 return m_llintExecuteCounter
.checkIfThresholdCrossedAndSet(this);
691 void dontJITAnytimeSoon()
693 m_llintExecuteCounter
.deferIndefinitely();
696 void jitAfterWarmUp()
698 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
703 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITSoon(), this);
706 const BaselineExecutionCounter
& llintExecuteCounter() const
708 return m_llintExecuteCounter
;
711 // Functions for controlling when tiered compilation kicks in. This
712 // controls both when the optimizing compiler is invoked and when OSR
713 // entry happens. Two triggers exist: the loop trigger and the return
714 // trigger. In either case, when an addition to m_jitExecuteCounter
715 // causes it to become non-negative, the optimizing compiler is
716 // invoked. This includes a fast check to see if this CodeBlock has
717 // already been optimized (i.e. replacement() returns a CodeBlock
718 // that was optimized with a higher tier JIT than this one). In the
719 // case of the loop trigger, if the optimized compilation succeeds
720 // (or has already succeeded in the past) then OSR is attempted to
721 // redirect program flow into the optimized code.
723 // These functions are called from within the optimization triggers,
724 // and are used as a single point at which we define the heuristics
725 // for how much warm-up is mandated before the next optimization
726 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
727 // as this is called from the CodeBlock constructor.
729 // When we observe a lot of speculation failures, we trigger a
730 // reoptimization. But each time, we increase the optimization trigger
731 // to avoid thrashing.
732 JS_EXPORT_PRIVATE
unsigned reoptimizationRetryCounter() const;
733 void countReoptimization();
735 unsigned numberOfDFGCompiles();
737 int32_t codeTypeThresholdMultiplier() const;
739 int32_t adjustedCounterValue(int32_t desiredThreshold
);
741 int32_t* addressOfJITExecuteCounter()
743 return &m_jitExecuteCounter
.m_counter
;
746 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(BaselineExecutionCounter
, m_counter
); }
747 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(BaselineExecutionCounter
, m_activeThreshold
); }
748 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(BaselineExecutionCounter
, m_totalCount
); }
750 const BaselineExecutionCounter
& jitExecuteCounter() const { return m_jitExecuteCounter
; }
752 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter
; }
754 // Check if the optimization threshold has been reached, and if not,
755 // adjust the heuristics accordingly. Returns true if the threshold has
757 bool checkIfOptimizationThresholdReached();
759 // Call this to force the next optimization trigger to fire. This is
760 // rarely wise, since optimization triggers are typically more
761 // expensive than executing baseline code.
762 void optimizeNextInvocation();
764 // Call this to prevent optimization from happening again. Note that
765 // optimization will still happen after roughly 2^29 invocations,
766 // so this is really meant to delay that as much as possible. This
767 // is called if optimization failed, and we expect it to fail in
768 // the future as well.
769 void dontOptimizeAnytimeSoon();
771 // Call this to reinitialize the counter to its starting state,
772 // forcing a warm-up to happen before the next optimization trigger
773 // fires. This is called in the CodeBlock constructor. It also
774 // makes sense to call this if an OSR exit occurred. Note that
775 // OSR exit code is code generated, so the value of the execute
776 // counter that this corresponds to is also available directly.
777 void optimizeAfterWarmUp();
779 // Call this to force an optimization trigger to fire only after
781 void optimizeAfterLongWarmUp();
783 // Call this to cause an optimization trigger to fire soon, but
784 // not necessarily the next one. This makes sense if optimization
785 // succeeds. Successfuly optimization means that all calls are
786 // relinked to the optimized code, so this only affects call
787 // frames that are still executing this CodeBlock. The value here
788 // is tuned to strike a balance between the cost of OSR entry
789 // (which is too high to warrant making every loop back edge to
790 // trigger OSR immediately) and the cost of executing baseline
791 // code (which is high enough that we don't necessarily want to
792 // have a full warm-up). The intuition for calling this instead of
793 // optimizeNextInvocation() is for the case of recursive functions
794 // with loops. Consider that there may be N call frames of some
795 // recursive function, for a reasonably large value of N. The top
796 // one triggers optimization, and then returns, and then all of
797 // the others return. We don't want optimization to be triggered on
798 // each return, as that would be superfluous. It only makes sense
799 // to trigger optimization if one of those functions becomes hot
800 // in the baseline code.
803 void forceOptimizationSlowPathConcurrently();
805 void setOptimizationThresholdBasedOnCompilationResult(CompilationResult
);
807 uint32_t osrExitCounter() const { return m_osrExitCounter
; }
809 void countOSRExit() { m_osrExitCounter
++; }
811 uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter
; }
813 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_osrExitCounter
); }
815 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold
);
816 uint32_t exitCountThresholdForReoptimization();
817 uint32_t exitCountThresholdForReoptimizationFromLoop();
818 bool shouldReoptimizeNow();
819 bool shouldReoptimizeFromLoopNow();
821 void optimizeAfterWarmUp() { }
822 unsigned numberOfDFGCompiles() { return 0; }
825 bool shouldOptimizeNow();
826 void updateAllValueProfilePredictions();
827 void updateAllArrayPredictions();
828 void updateAllPredictions();
830 unsigned frameRegisterCount();
831 int stackPointerOffset();
833 bool hasOpDebugForLineAndColumn(unsigned line
, unsigned column
);
835 bool hasDebuggerRequests() const { return m_debuggerRequests
; }
836 void* debuggerRequestsAddress() { return &m_debuggerRequests
; }
838 void addBreakpoint(unsigned numBreakpoints
);
839 void removeBreakpoint(unsigned numBreakpoints
)
841 ASSERT(m_numBreakpoints
>= numBreakpoints
);
842 m_numBreakpoints
-= numBreakpoints
;
846 SteppingModeDisabled
,
849 void setSteppingMode(SteppingMode
);
851 void clearDebuggerRequests()
853 m_steppingMode
= SteppingModeDisabled
;
854 m_numBreakpoints
= 0;
857 // FIXME: Make these remaining members private.
859 int m_numCalleeRegisters
;
861 bool m_isConstructor
: 1;
863 // This is intentionally public; it's the responsibility of anyone doing any
864 // of the following to hold the lock:
866 // - Modifying any inline cache in this code block.
868 // - Quering any inline cache in this code block, from a thread other than
871 // Additionally, it's only legal to modify the inline cache on the main
872 // thread. This means that the main thread can query the inline cache without
873 // locking. This is crucial since executing the inline cache is effectively
876 // Another exception to the rules is that the GC can do whatever it wants
877 // without holding any locks, because the GC is guaranteed to wait until any
878 // concurrent compilation threads finish what they're doing.
879 mutable ConcurrentJITLock m_lock
;
881 bool m_shouldAlwaysBeInlined
; // Not a bitfield because the JIT wants to store to it.
882 bool m_allTransitionsHaveBeenMarked
: 1; // Initialized and used on every GC.
884 bool m_didFailFTLCompilation
: 1;
885 bool m_hasBeenCompiledWithFTL
: 1;
887 // Internal methods for use by validation code. It would be private if it wasn't
888 // for the fact that we use it from anonymous namespaces.
889 void beginValidationDidFail();
890 NO_RETURN_DUE_TO_CRASH
void endValidationDidFail();
892 bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live.
895 WTF_MAKE_FAST_ALLOCATED
;
897 Vector
<HandlerInfo
> m_exceptionHandlers
;
899 // Buffers used for large array literals
900 Vector
<Vector
<JSValue
>> m_constantBuffers
;
903 Vector
<SimpleJumpTable
> m_switchJumpTables
;
904 Vector
<StringJumpTable
> m_stringSwitchJumpTables
;
906 EvalCodeCache m_evalCodeCache
;
910 virtual void visitWeakReferences(SlotVisitor
&) override
;
911 virtual void finalizeUnconditionally() override
;
914 void tallyFrequentExitSites();
916 void tallyFrequentExitSites() { }
920 friend class CodeBlockSet
;
922 CodeBlock
* specialOSREntryBlockOrNull();
924 void noticeIncomingCall(ExecState
* callerFrame
);
926 double optimizationThresholdScalingFactor();
928 void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles
, unsigned& numberOfSamplesInProfiles
);
930 void setConstantRegisters(const Vector
<WriteBarrier
<Unknown
>>& constants
, const Vector
<SourceCodeRepresentation
>& constantsSourceCodeRepresentation
)
932 ASSERT(constants
.size() == constantsSourceCodeRepresentation
.size());
933 size_t count
= constants
.size();
934 m_constantRegisters
.resizeToFit(count
);
935 for (size_t i
= 0; i
< count
; i
++)
936 m_constantRegisters
[i
].set(*m_vm
, ownerExecutable(), constants
[i
].get());
937 m_constantsSourceCodeRepresentation
= constantsSourceCodeRepresentation
;
941 PrintStream
&, ExecState
*, const Instruction
* begin
, const Instruction
*&,
942 const StubInfoMap
& = StubInfoMap(), const CallLinkInfoMap
& = CallLinkInfoMap());
944 CString
registerName(int r
) const;
945 CString
constantName(int index
) const;
946 void printUnaryOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
947 void printBinaryOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
948 void printConditionalJump(PrintStream
&, ExecState
*, const Instruction
*, const Instruction
*&, int location
, const char* op
);
949 void printGetByIdOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&);
950 void printGetByIdCacheStatus(PrintStream
&, ExecState
*, int location
, const StubInfoMap
&);
951 enum CacheDumpMode
{ DumpCaches
, DontDumpCaches
};
952 void printCallOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
, CacheDumpMode
, bool& hasPrintedProfiling
, const CallLinkInfoMap
&);
953 void printPutByIdOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
954 void printPutByIdCacheStatus(PrintStream
&, ExecState
*, int location
, const StubInfoMap
&);
955 void printLocationAndOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
956 void printLocationOpAndRegisterOperand(PrintStream
&, ExecState
*, int location
, const Instruction
*& it
, const char* op
, int operand
);
958 void beginDumpProfiling(PrintStream
&, bool& hasPrintedProfiling
);
959 void dumpValueProfiling(PrintStream
&, const Instruction
*&, bool& hasPrintedProfiling
);
960 void dumpArrayProfiling(PrintStream
&, const Instruction
*&, bool& hasPrintedProfiling
);
961 void dumpRareCaseProfile(PrintStream
&, const char* name
, RareCaseProfile
*, bool& hasPrintedProfiling
);
963 bool shouldImmediatelyAssumeLivenessDuringScan();
965 void propagateTransitions(SlotVisitor
&);
966 void determineLiveness(SlotVisitor
&);
968 void stronglyVisitStrongReferences(SlotVisitor
&);
969 void stronglyVisitWeakReferences(SlotVisitor
&);
971 void createRareDataIfNecessary()
974 m_rareData
= std::make_unique
<RareData
>();
977 void insertBasicBlockBoundariesForControlFlowProfiler(Vector
<Instruction
, 0, UnsafeVectorOverflow
>&);
980 void resetStubInternal(RepatchBuffer
&, StructureStubInfo
&);
981 void resetStubDuringGCInternal(RepatchBuffer
&, StructureStubInfo
&);
983 WriteBarrier
<UnlinkedCodeBlock
> m_unlinkedCode
;
986 unsigned m_debuggerRequests
;
988 unsigned m_hasDebuggerStatement
: 1;
989 unsigned m_steppingMode
: 1;
990 unsigned m_numBreakpoints
: 30;
993 WriteBarrier
<ScriptExecutable
> m_ownerExecutable
;
996 RefCountedArray
<Instruction
> m_instructions
;
997 WriteBarrier
<SymbolTable
> m_symbolTable
;
998 VirtualRegister m_thisRegister
;
999 VirtualRegister m_scopeRegister
;
1000 VirtualRegister m_lexicalEnvironmentRegister
;
1002 bool m_isStrictMode
;
1003 bool m_needsActivation
;
1004 bool m_mayBeExecuting
;
1005 Atomic
<bool> m_visitAggregateHasBeenCalled
;
1007 RefPtr
<SourceProvider
> m_source
;
1008 unsigned m_sourceOffset
;
1009 unsigned m_firstLineColumnOffset
;
1010 unsigned m_codeType
;
1012 Vector
<LLIntCallLinkInfo
> m_llintCallLinkInfos
;
1013 SentinelLinkedList
<LLIntCallLinkInfo
, BasicRawSentinelNode
<LLIntCallLinkInfo
>> m_incomingLLIntCalls
;
1014 RefPtr
<JITCode
> m_jitCode
;
1016 Bag
<StructureStubInfo
> m_stubInfos
;
1017 Vector
<ByValInfo
> m_byValInfos
;
1018 Bag
<CallLinkInfo
> m_callLinkInfos
;
1019 SentinelLinkedList
<CallLinkInfo
, BasicRawSentinelNode
<CallLinkInfo
>> m_incomingCalls
;
1020 SentinelLinkedList
<PolymorphicCallNode
, BasicRawSentinelNode
<PolymorphicCallNode
>> m_incomingPolymorphicCalls
;
1022 std::unique_ptr
<CompactJITCodeMap
> m_jitCodeMap
;
1024 // This is relevant to non-DFG code blocks that serve as the profiled code block
1025 // for DFG code blocks.
1026 DFG::ExitProfile m_exitProfile
;
1027 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles
;
1029 Vector
<ValueProfile
> m_argumentValueProfiles
;
1030 Vector
<ValueProfile
> m_valueProfiles
;
1031 SegmentedVector
<RareCaseProfile
, 8> m_rareCaseProfiles
;
1032 SegmentedVector
<RareCaseProfile
, 8> m_specialFastCaseProfiles
;
1033 Vector
<ArrayAllocationProfile
> m_arrayAllocationProfiles
;
1034 ArrayProfileVector m_arrayProfiles
;
1035 Vector
<ObjectAllocationProfile
> m_objectAllocationProfiles
;
1038 COMPILE_ASSERT(sizeof(Register
) == sizeof(WriteBarrier
<Unknown
>), Register_must_be_same_size_as_WriteBarrier_Unknown
);
1039 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
1040 // it, so we're stuck with it for now.
1041 Vector
<WriteBarrier
<Unknown
>> m_constantRegisters
;
1042 Vector
<SourceCodeRepresentation
> m_constantsSourceCodeRepresentation
;
1043 Vector
<WriteBarrier
<FunctionExecutable
>> m_functionDecls
;
1044 Vector
<WriteBarrier
<FunctionExecutable
>> m_functionExprs
;
1046 RefPtr
<CodeBlock
> m_alternative
;
1048 BaselineExecutionCounter m_llintExecuteCounter
;
1050 BaselineExecutionCounter m_jitExecuteCounter
;
1051 int32_t m_totalJITExecutions
;
1052 uint32_t m_osrExitCounter
;
1053 uint16_t m_optimizationDelayCounter
;
1054 uint16_t m_reoptimizationRetryCounter
;
1056 mutable CodeBlockHash m_hash
;
1058 std::unique_ptr
<BytecodeLivenessAnalysis
> m_livenessAnalysis
;
1060 std::unique_ptr
<RareData
> m_rareData
;
1062 DFG::CapabilityLevel m_capabilityLevelState
;
1066 // Program code is not marked by any function, so we make the global object
1067 // responsible for marking it.
1069 class GlobalCodeBlock
: public CodeBlock
{
1071 GlobalCodeBlock(CopyParsedBlockTag
, GlobalCodeBlock
& other
)
1072 : CodeBlock(CopyParsedBlock
, other
)
1076 GlobalCodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
)
1077 : CodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, sourceOffset
, firstLineColumnOffset
)
1082 class ProgramCodeBlock
: public GlobalCodeBlock
{
1084 ProgramCodeBlock(CopyParsedBlockTag
, ProgramCodeBlock
& other
)
1085 : GlobalCodeBlock(CopyParsedBlock
, other
)
1089 ProgramCodeBlock(ProgramExecutable
* ownerExecutable
, UnlinkedProgramCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned firstLineColumnOffset
)
1090 : GlobalCodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, 0, firstLineColumnOffset
)
1096 virtual CodeBlock
* replacement() override
;
1097 virtual DFG::CapabilityLevel
capabilityLevelInternal() override
;
1101 class EvalCodeBlock
: public GlobalCodeBlock
{
1103 EvalCodeBlock(CopyParsedBlockTag
, EvalCodeBlock
& other
)
1104 : GlobalCodeBlock(CopyParsedBlock
, other
)
1108 EvalCodeBlock(EvalExecutable
* ownerExecutable
, UnlinkedEvalCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
)
1109 : GlobalCodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, 0, 1)
1113 const Identifier
& variable(unsigned index
) { return unlinkedEvalCodeBlock()->variable(index
); }
1114 unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
1118 virtual CodeBlock
* replacement() override
;
1119 virtual DFG::CapabilityLevel
capabilityLevelInternal() override
;
1123 UnlinkedEvalCodeBlock
* unlinkedEvalCodeBlock() const { return jsCast
<UnlinkedEvalCodeBlock
*>(unlinkedCodeBlock()); }
1126 class FunctionCodeBlock
: public CodeBlock
{
1128 FunctionCodeBlock(CopyParsedBlockTag
, FunctionCodeBlock
& other
)
1129 : CodeBlock(CopyParsedBlock
, other
)
1133 FunctionCodeBlock(FunctionExecutable
* ownerExecutable
, UnlinkedFunctionCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
)
1134 : CodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, sourceOffset
, firstLineColumnOffset
)
1140 virtual CodeBlock
* replacement() override
;
1141 virtual DFG::CapabilityLevel
capabilityLevelInternal() override
;
1145 inline CodeBlock
* baselineCodeBlockForInlineCallFrame(InlineCallFrame
* inlineCallFrame
)
1147 RELEASE_ASSERT(inlineCallFrame
);
1148 ExecutableBase
* executable
= inlineCallFrame
->executable
.get();
1149 RELEASE_ASSERT(executable
->structure()->classInfo() == FunctionExecutable::info());
1150 return static_cast<FunctionExecutable
*>(executable
)->baselineCodeBlockFor(inlineCallFrame
->specializationKind());
1153 inline CodeBlock
* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin
& codeOrigin
, CodeBlock
* baselineCodeBlock
)
1155 if (codeOrigin
.inlineCallFrame
)
1156 return baselineCodeBlockForInlineCallFrame(codeOrigin
.inlineCallFrame
);
1157 return baselineCodeBlock
;
1160 inline Register
& ExecState::r(int index
)
1162 CodeBlock
* codeBlock
= this->codeBlock();
1163 if (codeBlock
->isConstantRegisterIndex(index
))
1164 return *reinterpret_cast<Register
*>(&codeBlock
->constantRegister(index
));
1168 inline Register
& ExecState::r(VirtualRegister reg
)
1170 return r(reg
.offset());
1173 inline Register
& ExecState::uncheckedR(int index
)
1175 RELEASE_ASSERT(index
< FirstConstantRegisterIndex
);
1179 inline Register
& ExecState::uncheckedR(VirtualRegister reg
)
1181 return uncheckedR(reg
.offset());
1184 inline void CodeBlockSet::mark(void* candidateCodeBlock
)
1186 // We have to check for 0 and -1 because those are used by the HashMap as markers.
1187 uintptr_t value
= reinterpret_cast<uintptr_t>(candidateCodeBlock
);
1189 // This checks for both of those nasty cases in one go.
1195 CodeBlock
* codeBlock
= static_cast<CodeBlock
*>(candidateCodeBlock
);
1196 if (!m_oldCodeBlocks
.contains(codeBlock
) && !m_newCodeBlocks
.contains(codeBlock
))
1202 inline void CodeBlockSet::mark(CodeBlock
* codeBlock
)
1207 if (codeBlock
->m_mayBeExecuting
)
1210 codeBlock
->m_mayBeExecuting
= true;
1211 // We might not have cleared the marks for this CodeBlock, but we need to visit it.
1212 codeBlock
->m_visitAggregateHasBeenCalled
.store(false, std::memory_order_relaxed
);
1214 m_currentlyExecuting
.append(codeBlock
);
1218 template <typename Functor
> inline void ScriptExecutable::forEachCodeBlock(Functor
&& functor
)
1221 case ProgramExecutableType
: {
1222 if (CodeBlock
* codeBlock
= jsCast
<ProgramExecutable
*>(this)->m_programCodeBlock
.get())
1223 codeBlock
->forEachRelatedCodeBlock(std::forward
<Functor
>(functor
));
1227 case EvalExecutableType
: {
1228 if (CodeBlock
* codeBlock
= jsCast
<EvalExecutable
*>(this)->m_evalCodeBlock
.get())
1229 codeBlock
->forEachRelatedCodeBlock(std::forward
<Functor
>(functor
));
1233 case FunctionExecutableType
: {
1234 Functor
f(std::forward
<Functor
>(functor
));
1235 FunctionExecutable
* executable
= jsCast
<FunctionExecutable
*>(this);
1236 if (CodeBlock
* codeBlock
= executable
->m_codeBlockForCall
.get())
1237 codeBlock
->forEachRelatedCodeBlock(f
);
1238 if (CodeBlock
* codeBlock
= executable
->m_codeBlockForConstruct
.get())
1239 codeBlock
->forEachRelatedCodeBlock(f
);
1243 RELEASE_ASSERT_NOT_REACHED();
1249 #endif // CodeBlock_h