2 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "ArrayProfile.h"
34 #include "ByValInfo.h"
35 #include "BytecodeConventions.h"
36 #include "BytecodeLivenessAnalysis.h"
37 #include "CallLinkInfo.h"
38 #include "CallReturnOffsetToBytecodeOffset.h"
39 #include "CodeBlockHash.h"
40 #include "CodeBlockSet.h"
41 #include "ConcurrentJITLock.h"
42 #include "CodeOrigin.h"
44 #include "CompactJITCodeMap.h"
45 #include "DFGCommon.h"
46 #include "DFGCommonData.h"
47 #include "DFGExitProfile.h"
48 #include "DeferredCompilationCallback.h"
49 #include "EvalCodeCache.h"
50 #include "ExecutionCounter.h"
51 #include "ExpressionRangeInfo.h"
52 #include "HandlerInfo.h"
53 #include "ObjectAllocationProfile.h"
55 #include "PutPropertySlot.h"
56 #include "Instruction.h"
58 #include "JITWriteBarrier.h"
59 #include "JSGlobalObject.h"
60 #include "JumpTable.h"
61 #include "LLIntCallLinkInfo.h"
62 #include "LazyOperandValueProfile.h"
63 #include "ProfilerCompilation.h"
64 #include "ProfilerJettisonReason.h"
65 #include "RegExpObject.h"
66 #include "StructureStubInfo.h"
67 #include "UnconditionalFinalizer.h"
68 #include "ValueProfile.h"
69 #include "VirtualRegister.h"
70 #include "Watchpoint.h"
72 #include <wtf/FastMalloc.h>
73 #include <wtf/PassOwnPtr.h>
74 #include <wtf/RefCountedArray.h>
75 #include <wtf/RefPtr.h>
76 #include <wtf/SegmentedVector.h>
77 #include <wtf/Vector.h>
78 #include <wtf/text/WTFString.h>
83 class LLIntOffsetsExtractor
;
86 inline VirtualRegister
unmodifiedArgumentsRegister(VirtualRegister argumentsRegister
) { return VirtualRegister(argumentsRegister
.offset() + 1); }
88 static ALWAYS_INLINE
int missingThisObjectMarker() { return std::numeric_limits
<int>::max(); }
90 enum ReoptimizationMode
{ DontCountReoptimization
, CountReoptimization
};
92 class CodeBlock
: public ThreadSafeRefCounted
<CodeBlock
>, public UnconditionalFinalizer
, public WeakReferenceHarvester
{
93 WTF_MAKE_FAST_ALLOCATED
;
94 friend class BytecodeLivenessAnalysis
;
96 friend class LLIntOffsetsExtractor
;
98 enum CopyParsedBlockTag
{ CopyParsedBlock
};
100 CodeBlock(CopyParsedBlockTag
, CodeBlock
& other
);
102 CodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
*, JSScope
*, PassRefPtr
<SourceProvider
>, unsigned sourceOffset
, unsigned firstLineColumnOffset
);
104 WriteBarrier
<JSGlobalObject
> m_globalObject
;
108 JS_EXPORT_PRIVATE
virtual ~CodeBlock();
110 UnlinkedCodeBlock
* unlinkedCodeBlock() const { return m_unlinkedCode
.get(); }
112 CString
inferredName() const;
113 CodeBlockHash
hash() const;
114 bool hasHash() const;
115 bool isSafeToComputeHash() const;
116 CString
hashAsStringIfPossible() const;
117 CString
sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
118 CString
sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
119 void dumpAssumingJITType(PrintStream
&, JITCode::JITType
) const;
120 void dump(PrintStream
&) const;
122 int numParameters() const { return m_numParameters
; }
123 void setNumParameters(int newValue
);
125 int* addressOfNumParameters() { return &m_numParameters
; }
126 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock
, m_numParameters
); }
128 CodeBlock
* alternative() { return m_alternative
.get(); }
129 PassRefPtr
<CodeBlock
> releaseAlternative() { return m_alternative
.release(); }
130 void setAlternative(PassRefPtr
<CodeBlock
> alternative
) { m_alternative
= alternative
; }
132 template <typename Functor
> void forEachRelatedCodeBlock(Functor
&& functor
)
134 Functor
f(std::forward
<Functor
>(functor
));
135 Vector
<CodeBlock
*, 4> codeBlocks
;
136 codeBlocks
.append(this);
138 while (!codeBlocks
.isEmpty()) {
139 CodeBlock
* currentCodeBlock
= codeBlocks
.takeLast();
142 if (CodeBlock
* alternative
= currentCodeBlock
->alternative())
143 codeBlocks
.append(alternative
);
144 if (CodeBlock
* osrEntryBlock
= currentCodeBlock
->specialOSREntryBlockOrNull())
145 codeBlocks
.append(osrEntryBlock
);
149 CodeSpecializationKind
specializationKind() const
151 return specializationFromIsConstruct(m_isConstructor
);
154 CodeBlock
* baselineAlternative();
156 // FIXME: Get rid of this.
157 // https://bugs.webkit.org/show_bug.cgi?id=123677
158 CodeBlock
* baselineVersion();
160 void visitAggregate(SlotVisitor
&);
162 void dumpBytecode(PrintStream
& = WTF::dataFile());
164 PrintStream
&, unsigned bytecodeOffset
,
165 const StubInfoMap
& = StubInfoMap(), const CallLinkInfoMap
& = CallLinkInfoMap());
166 void printStructures(PrintStream
&, const Instruction
*);
167 void printStructure(PrintStream
&, const char* name
, const Instruction
*, int operand
);
169 bool isStrictMode() const { return m_isStrictMode
; }
170 ECMAMode
ecmaMode() const { return isStrictMode() ? StrictMode
: NotStrictMode
; }
172 inline bool isKnownNotImmediate(int index
)
174 if (index
== m_thisRegister
.offset() && !m_isStrictMode
)
177 if (isConstantRegisterIndex(index
))
178 return getConstant(index
).isCell();
183 ALWAYS_INLINE
bool isTemporaryRegisterIndex(int index
)
185 return index
>= m_numVars
;
188 HandlerInfo
* handlerForBytecodeOffset(unsigned bytecodeOffset
);
189 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset
);
190 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset
);
191 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset
, int& divot
,
192 int& startOffset
, int& endOffset
, unsigned& line
, unsigned& column
);
194 void getStubInfoMap(const ConcurrentJITLocker
&, StubInfoMap
& result
);
195 void getStubInfoMap(StubInfoMap
& result
);
197 void getCallLinkInfoMap(const ConcurrentJITLocker
&, CallLinkInfoMap
& result
);
198 void getCallLinkInfoMap(CallLinkInfoMap
& result
);
201 StructureStubInfo
* addStubInfo();
202 Bag
<StructureStubInfo
>::iterator
stubInfoBegin() { return m_stubInfos
.begin(); }
203 Bag
<StructureStubInfo
>::iterator
stubInfoEnd() { return m_stubInfos
.end(); }
205 void resetStub(StructureStubInfo
&);
207 ByValInfo
& getByValInfo(unsigned bytecodeIndex
)
209 return *(binarySearch
<ByValInfo
, unsigned>(m_byValInfos
, m_byValInfos
.size(), bytecodeIndex
, getByValInfoBytecodeIndex
));
212 CallLinkInfo
* addCallLinkInfo();
213 Bag
<CallLinkInfo
>::iterator
callLinkInfosBegin() { return m_callLinkInfos
.begin(); }
214 Bag
<CallLinkInfo
>::iterator
callLinkInfosEnd() { return m_callLinkInfos
.end(); }
216 // This is a slow function call used primarily for compiling OSR exits in the case
217 // that there had been inlining. Chances are if you want to use this, you're really
218 // looking for a CallLinkInfoMap to amortize the cost of calling this.
219 CallLinkInfo
* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex
);
220 #endif // ENABLE(JIT)
222 void unlinkIncomingCalls();
227 void linkIncomingCall(ExecState
* callerFrame
, CallLinkInfo
*);
229 bool isIncomingCallAlreadyLinked(CallLinkInfo
* incoming
)
231 return m_incomingCalls
.isOnList(incoming
);
233 #endif // ENABLE(JIT)
235 void linkIncomingCall(ExecState
* callerFrame
, LLIntCallLinkInfo
*);
237 void setJITCodeMap(PassOwnPtr
<CompactJITCodeMap
> jitCodeMap
)
239 m_jitCodeMap
= jitCodeMap
;
241 CompactJITCodeMap
* jitCodeMap()
243 return m_jitCodeMap
.get();
246 unsigned bytecodeOffset(Instruction
* returnAddress
)
248 RELEASE_ASSERT(returnAddress
>= instructions().begin() && returnAddress
< instructions().end());
249 return static_cast<Instruction
*>(returnAddress
) - instructions().begin();
252 bool isNumericCompareFunction() { return m_unlinkedCode
->isNumericCompareFunction(); }
254 unsigned numberOfInstructions() const { return m_instructions
.size(); }
255 RefCountedArray
<Instruction
>& instructions() { return m_instructions
; }
256 const RefCountedArray
<Instruction
>& instructions() const { return m_instructions
; }
258 size_t predictedMachineCodeSize();
260 bool usesOpcode(OpcodeID
);
262 unsigned instructionCount() const { return m_instructions
.size(); }
264 int argumentIndexAfterCapture(size_t argument
);
266 bool hasSlowArguments();
267 const SlowArgument
* machineSlowArguments();
269 // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
272 // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
273 PassRefPtr
<CodeBlock
> newReplacement();
275 void setJITCode(PassRefPtr
<JITCode
> code
)
277 ASSERT(m_heap
->isDeferred());
278 m_heap
->reportExtraMemoryCost(code
->size());
279 ConcurrentJITLocker
locker(m_lock
);
280 WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
283 PassRefPtr
<JITCode
> jitCode() { return m_jitCode
; }
284 JITCode::JITType
jitType() const
286 JITCode
* jitCode
= m_jitCode
.get();
287 WTF::loadLoadFence();
288 JITCode::JITType result
= JITCode::jitTypeFor(jitCode
);
289 WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
293 bool hasBaselineJITProfiling() const
295 return jitType() == JITCode::BaselineJIT
;
299 virtual CodeBlock
* replacement() = 0;
301 virtual DFG::CapabilityLevel
capabilityLevelInternal() = 0;
302 DFG::CapabilityLevel
capabilityLevel();
303 DFG::CapabilityLevel
capabilityLevelState() { return m_capabilityLevelState
; }
305 bool hasOptimizedReplacement(JITCode::JITType typeToReplace
);
306 bool hasOptimizedReplacement(); // the typeToReplace is my JITType
309 void jettison(Profiler::JettisonReason
, ReoptimizationMode
= DontCountReoptimization
);
311 ScriptExecutable
* ownerExecutable() const { return m_ownerExecutable
.get(); }
313 void setVM(VM
* vm
) { m_vm
= vm
; }
314 VM
* vm() { return m_vm
; }
316 void setThisRegister(VirtualRegister thisRegister
) { m_thisRegister
= thisRegister
; }
317 VirtualRegister
thisRegister() const { return m_thisRegister
; }
319 bool usesEval() const { return m_unlinkedCode
->usesEval(); }
321 void setArgumentsRegister(VirtualRegister argumentsRegister
)
323 ASSERT(argumentsRegister
.isValid());
324 m_argumentsRegister
= argumentsRegister
;
325 ASSERT(usesArguments());
327 VirtualRegister
argumentsRegister() const
329 ASSERT(usesArguments());
330 return m_argumentsRegister
;
332 VirtualRegister
uncheckedArgumentsRegister()
334 if (!usesArguments())
335 return VirtualRegister();
336 return argumentsRegister();
338 void setActivationRegister(VirtualRegister activationRegister
)
340 m_activationRegister
= activationRegister
;
343 VirtualRegister
activationRegister() const
345 ASSERT(m_activationRegister
.isValid());
346 return m_activationRegister
;
349 VirtualRegister
uncheckedActivationRegister()
351 return m_activationRegister
;
354 bool usesArguments() const { return m_argumentsRegister
.isValid(); }
356 bool needsActivation() const
358 ASSERT(m_activationRegister
.isValid() == m_needsActivation
);
359 return m_needsActivation
;
362 unsigned captureCount() const
366 return symbolTable()->captureCount();
369 int captureStart() const
373 return symbolTable()->captureStart();
376 int captureEnd() const
380 return symbolTable()->captureEnd();
383 bool isCaptured(VirtualRegister operand
, InlineCallFrame
* = 0) const;
385 int framePointerOffsetToGetActivationRegisters(int machineCaptureStart
);
386 int framePointerOffsetToGetActivationRegisters();
388 CodeType
codeType() const { return m_unlinkedCode
->codeType(); }
389 PutPropertySlot::Context
putByIdContext() const
391 if (codeType() == EvalCode
)
392 return PutPropertySlot::PutByIdEval
;
393 return PutPropertySlot::PutById
;
396 SourceProvider
* source() const { return m_source
.get(); }
397 unsigned sourceOffset() const { return m_sourceOffset
; }
398 unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset
; }
400 size_t numberOfJumpTargets() const { return m_unlinkedCode
->numberOfJumpTargets(); }
401 unsigned jumpTarget(int index
) const { return m_unlinkedCode
->jumpTarget(index
); }
403 void clearEvalCache();
405 String
nameForRegister(VirtualRegister
);
408 void setNumberOfByValInfos(size_t size
) { m_byValInfos
.resizeToFit(size
); }
409 size_t numberOfByValInfos() const { return m_byValInfos
.size(); }
410 ByValInfo
& byValInfo(size_t index
) { return m_byValInfos
[index
]; }
413 unsigned numberOfArgumentValueProfiles()
415 ASSERT(m_numParameters
>= 0);
416 ASSERT(m_argumentValueProfiles
.size() == static_cast<unsigned>(m_numParameters
));
417 return m_argumentValueProfiles
.size();
419 ValueProfile
* valueProfileForArgument(unsigned argumentIndex
)
421 ValueProfile
* result
= &m_argumentValueProfiles
[argumentIndex
];
422 ASSERT(result
->m_bytecodeOffset
== -1);
426 unsigned numberOfValueProfiles() { return m_valueProfiles
.size(); }
427 ValueProfile
* valueProfile(int index
) { return &m_valueProfiles
[index
]; }
428 ValueProfile
* valueProfileForBytecodeOffset(int bytecodeOffset
)
430 ValueProfile
* result
= binarySearch
<ValueProfile
, int>(
431 m_valueProfiles
, m_valueProfiles
.size(), bytecodeOffset
,
432 getValueProfileBytecodeOffset
<ValueProfile
>);
433 ASSERT(result
->m_bytecodeOffset
!= -1);
434 ASSERT(instructions()[bytecodeOffset
+ opcodeLength(
435 m_vm
->interpreter
->getOpcodeID(
436 instructions()[bytecodeOffset
].u
.opcode
)) - 1].u
.profile
== result
);
439 SpeculatedType
valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker
& locker
, int bytecodeOffset
)
441 return valueProfileForBytecodeOffset(bytecodeOffset
)->computeUpdatedPrediction(locker
);
444 unsigned totalNumberOfValueProfiles()
446 return numberOfArgumentValueProfiles() + numberOfValueProfiles();
448 ValueProfile
* getFromAllValueProfiles(unsigned index
)
450 if (index
< numberOfArgumentValueProfiles())
451 return valueProfileForArgument(index
);
452 return valueProfile(index
- numberOfArgumentValueProfiles());
455 RareCaseProfile
* addRareCaseProfile(int bytecodeOffset
)
457 m_rareCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
458 return &m_rareCaseProfiles
.last();
460 unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles
.size(); }
461 RareCaseProfile
* rareCaseProfile(int index
) { return &m_rareCaseProfiles
[index
]; }
462 RareCaseProfile
* rareCaseProfileForBytecodeOffset(int bytecodeOffset
);
464 bool likelyToTakeSlowCase(int bytecodeOffset
)
466 if (!hasBaselineJITProfiling())
468 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
469 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
472 bool couldTakeSlowCase(int bytecodeOffset
)
474 if (!hasBaselineJITProfiling())
476 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
477 return value
>= Options::couldTakeSlowCaseMinimumCount();
480 RareCaseProfile
* addSpecialFastCaseProfile(int bytecodeOffset
)
482 m_specialFastCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
483 return &m_specialFastCaseProfiles
.last();
485 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles
.size(); }
486 RareCaseProfile
* specialFastCaseProfile(int index
) { return &m_specialFastCaseProfiles
[index
]; }
487 RareCaseProfile
* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset
)
489 return tryBinarySearch
<RareCaseProfile
, int>(
490 m_specialFastCaseProfiles
, m_specialFastCaseProfiles
.size(), bytecodeOffset
,
491 getRareCaseProfileBytecodeOffset
);
494 bool likelyToTakeSpecialFastCase(int bytecodeOffset
)
496 if (!hasBaselineJITProfiling())
498 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
499 return specialFastCaseCount
>= Options::likelyToTakeSlowCaseMinimumCount();
502 bool couldTakeSpecialFastCase(int bytecodeOffset
)
504 if (!hasBaselineJITProfiling())
506 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
507 return specialFastCaseCount
>= Options::couldTakeSlowCaseMinimumCount();
510 bool likelyToTakeDeepestSlowCase(int bytecodeOffset
)
512 if (!hasBaselineJITProfiling())
514 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
515 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
516 unsigned value
= slowCaseCount
- specialFastCaseCount
;
517 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
520 bool likelyToTakeAnySlowCase(int bytecodeOffset
)
522 if (!hasBaselineJITProfiling())
524 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
525 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
526 unsigned value
= slowCaseCount
+ specialFastCaseCount
;
527 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
530 unsigned numberOfArrayProfiles() const { return m_arrayProfiles
.size(); }
531 const ArrayProfileVector
& arrayProfiles() { return m_arrayProfiles
; }
532 ArrayProfile
* addArrayProfile(unsigned bytecodeOffset
)
534 m_arrayProfiles
.append(ArrayProfile(bytecodeOffset
));
535 return &m_arrayProfiles
.last();
537 ArrayProfile
* getArrayProfile(unsigned bytecodeOffset
);
538 ArrayProfile
* getOrAddArrayProfile(unsigned bytecodeOffset
);
540 // Exception handling support
542 size_t numberOfExceptionHandlers() const { return m_rareData
? m_rareData
->m_exceptionHandlers
.size() : 0; }
543 HandlerInfo
& exceptionHandler(int index
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_exceptionHandlers
[index
]; }
545 bool hasExpressionInfo() { return m_unlinkedCode
->hasExpressionInfo(); }
548 Vector
<CodeOrigin
, 0, UnsafeVectorOverflow
>& codeOrigins()
550 return m_jitCode
->dfgCommon()->codeOrigins
;
553 // Having code origins implies that there has been some inlining.
554 bool hasCodeOrigins()
556 return JITCode::isOptimizingJIT(jitType());
559 bool canGetCodeOrigin(unsigned index
)
561 if (!hasCodeOrigins())
563 return index
< codeOrigins().size();
566 CodeOrigin
codeOrigin(unsigned index
)
568 return codeOrigins()[index
];
571 bool addFrequentExitSite(const DFG::FrequentExitSite
& site
)
573 ASSERT(JITCode::isBaselineCode(jitType()));
574 ConcurrentJITLocker
locker(m_lock
);
575 return m_exitProfile
.add(locker
, site
);
578 bool hasExitSite(const ConcurrentJITLocker
& locker
, const DFG::FrequentExitSite
& site
) const
580 return m_exitProfile
.hasExitSite(locker
, site
);
582 bool hasExitSite(const DFG::FrequentExitSite
& site
) const
584 ConcurrentJITLocker
locker(m_lock
);
585 return hasExitSite(locker
, site
);
588 DFG::ExitProfile
& exitProfile() { return m_exitProfile
; }
590 CompressedLazyOperandValueProfileHolder
& lazyOperandValueProfiles()
592 return m_lazyOperandValueProfiles
;
594 #endif // ENABLE(DFG_JIT)
598 size_t numberOfIdentifiers() const { return m_unlinkedCode
->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
599 size_t numberOfDFGIdentifiers() const
601 if (!JITCode::isOptimizingJIT(jitType()))
604 return m_jitCode
->dfgCommon()->dfgIdentifiers
.size();
607 const Identifier
& identifier(int index
) const
609 size_t unlinkedIdentifiers
= m_unlinkedCode
->numberOfIdentifiers();
610 if (static_cast<unsigned>(index
) < unlinkedIdentifiers
)
611 return m_unlinkedCode
->identifier(index
);
612 ASSERT(JITCode::isOptimizingJIT(jitType()));
613 return m_jitCode
->dfgCommon()->dfgIdentifiers
[index
- unlinkedIdentifiers
];
616 size_t numberOfIdentifiers() const { return m_unlinkedCode
->numberOfIdentifiers(); }
617 const Identifier
& identifier(int index
) const { return m_unlinkedCode
->identifier(index
); }
620 Vector
<WriteBarrier
<Unknown
>>& constants() { return m_constantRegisters
; }
621 size_t numberOfConstantRegisters() const { return m_constantRegisters
.size(); }
622 unsigned addConstant(JSValue v
)
624 unsigned result
= m_constantRegisters
.size();
625 m_constantRegisters
.append(WriteBarrier
<Unknown
>());
626 m_constantRegisters
.last().set(m_globalObject
->vm(), m_ownerExecutable
.get(), v
);
630 unsigned addConstantLazily()
632 unsigned result
= m_constantRegisters
.size();
633 m_constantRegisters
.append(WriteBarrier
<Unknown
>());
637 bool findConstant(JSValue
, unsigned& result
);
638 unsigned addOrFindConstant(JSValue
);
639 WriteBarrier
<Unknown
>& constantRegister(int index
) { return m_constantRegisters
[index
- FirstConstantRegisterIndex
]; }
640 ALWAYS_INLINE
bool isConstantRegisterIndex(int index
) const { return index
>= FirstConstantRegisterIndex
; }
641 ALWAYS_INLINE JSValue
getConstant(int index
) const { return m_constantRegisters
[index
- FirstConstantRegisterIndex
].get(); }
643 FunctionExecutable
* functionDecl(int index
) { return m_functionDecls
[index
].get(); }
644 int numberOfFunctionDecls() { return m_functionDecls
.size(); }
645 FunctionExecutable
* functionExpr(int index
) { return m_functionExprs
[index
].get(); }
647 RegExp
* regexp(int index
) const { return m_unlinkedCode
->regexp(index
); }
649 unsigned numberOfConstantBuffers() const
653 return m_rareData
->m_constantBuffers
.size();
655 unsigned addConstantBuffer(const Vector
<JSValue
>& buffer
)
657 createRareDataIfNecessary();
658 unsigned size
= m_rareData
->m_constantBuffers
.size();
659 m_rareData
->m_constantBuffers
.append(buffer
);
663 Vector
<JSValue
>& constantBufferAsVector(unsigned index
)
666 return m_rareData
->m_constantBuffers
[index
];
668 JSValue
* constantBuffer(unsigned index
)
670 return constantBufferAsVector(index
).data();
673 Heap
* heap() const { return m_heap
; }
674 JSGlobalObject
* globalObject() { return m_globalObject
.get(); }
676 JSGlobalObject
* globalObjectFor(CodeOrigin
);
678 BytecodeLivenessAnalysis
& livenessAnalysis()
681 ConcurrentJITLocker
locker(m_lock
);
682 if (!!m_livenessAnalysis
)
683 return *m_livenessAnalysis
;
685 std::unique_ptr
<BytecodeLivenessAnalysis
> analysis
=
686 std::make_unique
<BytecodeLivenessAnalysis
>(this);
688 ConcurrentJITLocker
locker(m_lock
);
689 if (!m_livenessAnalysis
)
690 m_livenessAnalysis
= WTF::move(analysis
);
691 return *m_livenessAnalysis
;
699 size_t numberOfSwitchJumpTables() const { return m_rareData
? m_rareData
->m_switchJumpTables
.size() : 0; }
700 SimpleJumpTable
& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_switchJumpTables
.append(SimpleJumpTable()); return m_rareData
->m_switchJumpTables
.last(); }
701 SimpleJumpTable
& switchJumpTable(int tableIndex
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_switchJumpTables
[tableIndex
]; }
702 void clearSwitchJumpTables()
706 m_rareData
->m_switchJumpTables
.clear();
709 size_t numberOfStringSwitchJumpTables() const { return m_rareData
? m_rareData
->m_stringSwitchJumpTables
.size() : 0; }
710 StringJumpTable
& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_stringSwitchJumpTables
.append(StringJumpTable()); return m_rareData
->m_stringSwitchJumpTables
.last(); }
711 StringJumpTable
& stringSwitchJumpTable(int tableIndex
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_stringSwitchJumpTables
[tableIndex
]; }
714 SymbolTable
* symbolTable() const { return m_symbolTable
.get(); }
716 EvalCodeCache
& evalCodeCache() { createRareDataIfNecessary(); return m_rareData
->m_evalCodeCache
; }
719 // Shrink prior to generating machine code that may point directly into vectors.
722 // Shrink after generating machine code, and after possibly creating new vectors
723 // and appending to others. At this time it is not safe to shrink certain vectors
724 // because we would have generated machine code that references them directly.
727 void shrinkToFit(ShrinkMode
);
729 // Functions for controlling when JITting kicks in, in a mixed mode
732 bool checkIfJITThresholdReached()
734 return m_llintExecuteCounter
.checkIfThresholdCrossedAndSet(this);
737 void dontJITAnytimeSoon()
739 m_llintExecuteCounter
.deferIndefinitely();
742 void jitAfterWarmUp()
744 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
749 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITSoon(), this);
752 const BaselineExecutionCounter
& llintExecuteCounter() const
754 return m_llintExecuteCounter
;
757 // Functions for controlling when tiered compilation kicks in. This
758 // controls both when the optimizing compiler is invoked and when OSR
759 // entry happens. Two triggers exist: the loop trigger and the return
760 // trigger. In either case, when an addition to m_jitExecuteCounter
761 // causes it to become non-negative, the optimizing compiler is
762 // invoked. This includes a fast check to see if this CodeBlock has
763 // already been optimized (i.e. replacement() returns a CodeBlock
764 // that was optimized with a higher tier JIT than this one). In the
765 // case of the loop trigger, if the optimized compilation succeeds
766 // (or has already succeeded in the past) then OSR is attempted to
767 // redirect program flow into the optimized code.
769 // These functions are called from within the optimization triggers,
770 // and are used as a single point at which we define the heuristics
771 // for how much warm-up is mandated before the next optimization
772 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
773 // as this is called from the CodeBlock constructor.
775 // When we observe a lot of speculation failures, we trigger a
776 // reoptimization. But each time, we increase the optimization trigger
777 // to avoid thrashing.
778 JS_EXPORT_PRIVATE
unsigned reoptimizationRetryCounter() const;
779 void countReoptimization();
781 unsigned numberOfDFGCompiles();
783 int32_t codeTypeThresholdMultiplier() const;
785 int32_t adjustedCounterValue(int32_t desiredThreshold
);
787 int32_t* addressOfJITExecuteCounter()
789 return &m_jitExecuteCounter
.m_counter
;
792 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(BaselineExecutionCounter
, m_counter
); }
793 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(BaselineExecutionCounter
, m_activeThreshold
); }
794 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(BaselineExecutionCounter
, m_totalCount
); }
796 const BaselineExecutionCounter
& jitExecuteCounter() const { return m_jitExecuteCounter
; }
798 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter
; }
800 // Check if the optimization threshold has been reached, and if not,
801 // adjust the heuristics accordingly. Returns true if the threshold has
803 bool checkIfOptimizationThresholdReached();
805 // Call this to force the next optimization trigger to fire. This is
806 // rarely wise, since optimization triggers are typically more
807 // expensive than executing baseline code.
808 void optimizeNextInvocation();
810 // Call this to prevent optimization from happening again. Note that
811 // optimization will still happen after roughly 2^29 invocations,
812 // so this is really meant to delay that as much as possible. This
813 // is called if optimization failed, and we expect it to fail in
814 // the future as well.
815 void dontOptimizeAnytimeSoon();
817 // Call this to reinitialize the counter to its starting state,
818 // forcing a warm-up to happen before the next optimization trigger
819 // fires. This is called in the CodeBlock constructor. It also
820 // makes sense to call this if an OSR exit occurred. Note that
821 // OSR exit code is code generated, so the value of the execute
822 // counter that this corresponds to is also available directly.
823 void optimizeAfterWarmUp();
825 // Call this to force an optimization trigger to fire only after
827 void optimizeAfterLongWarmUp();
829 // Call this to cause an optimization trigger to fire soon, but
830 // not necessarily the next one. This makes sense if optimization
831 // succeeds. Successfuly optimization means that all calls are
832 // relinked to the optimized code, so this only affects call
833 // frames that are still executing this CodeBlock. The value here
834 // is tuned to strike a balance between the cost of OSR entry
835 // (which is too high to warrant making every loop back edge to
836 // trigger OSR immediately) and the cost of executing baseline
837 // code (which is high enough that we don't necessarily want to
838 // have a full warm-up). The intuition for calling this instead of
839 // optimizeNextInvocation() is for the case of recursive functions
840 // with loops. Consider that there may be N call frames of some
841 // recursive function, for a reasonably large value of N. The top
842 // one triggers optimization, and then returns, and then all of
843 // the others return. We don't want optimization to be triggered on
844 // each return, as that would be superfluous. It only makes sense
845 // to trigger optimization if one of those functions becomes hot
846 // in the baseline code.
849 void forceOptimizationSlowPathConcurrently();
851 void setOptimizationThresholdBasedOnCompilationResult(CompilationResult
);
853 uint32_t osrExitCounter() const { return m_osrExitCounter
; }
855 void countOSRExit() { m_osrExitCounter
++; }
857 uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter
; }
859 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_osrExitCounter
); }
861 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold
);
862 uint32_t exitCountThresholdForReoptimization();
863 uint32_t exitCountThresholdForReoptimizationFromLoop();
864 bool shouldReoptimizeNow();
865 bool shouldReoptimizeFromLoopNow();
867 void optimizeAfterWarmUp() { }
868 unsigned numberOfDFGCompiles() { return 0; }
871 bool shouldOptimizeNow();
872 void updateAllValueProfilePredictions();
873 void updateAllArrayPredictions();
874 void updateAllPredictions();
876 unsigned frameRegisterCount();
877 int stackPointerOffset();
879 bool hasOpDebugForLineAndColumn(unsigned line
, unsigned column
);
881 bool hasDebuggerRequests() const { return m_debuggerRequests
; }
882 void* debuggerRequestsAddress() { return &m_debuggerRequests
; }
884 void addBreakpoint(unsigned numBreakpoints
);
885 void removeBreakpoint(unsigned numBreakpoints
)
887 ASSERT(m_numBreakpoints
>= numBreakpoints
);
888 m_numBreakpoints
-= numBreakpoints
;
892 SteppingModeDisabled
,
895 void setSteppingMode(SteppingMode
);
897 void clearDebuggerRequests()
899 m_steppingMode
= SteppingModeDisabled
;
900 m_numBreakpoints
= 0;
903 // FIXME: Make these remaining members private.
905 int m_numCalleeRegisters
;
907 bool m_isConstructor
: 1;
909 // This is intentionally public; it's the responsibility of anyone doing any
910 // of the following to hold the lock:
912 // - Modifying any inline cache in this code block.
914 // - Quering any inline cache in this code block, from a thread other than
917 // Additionally, it's only legal to modify the inline cache on the main
918 // thread. This means that the main thread can query the inline cache without
919 // locking. This is crucial since executing the inline cache is effectively
922 // Another exception to the rules is that the GC can do whatever it wants
923 // without holding any locks, because the GC is guaranteed to wait until any
924 // concurrent compilation threads finish what they're doing.
925 mutable ConcurrentJITLock m_lock
;
927 bool m_shouldAlwaysBeInlined
; // Not a bitfield because the JIT wants to store to it.
928 bool m_allTransitionsHaveBeenMarked
: 1; // Initialized and used on every GC.
930 bool m_didFailFTLCompilation
: 1;
931 bool m_hasBeenCompiledWithFTL
: 1;
933 // Internal methods for use by validation code. It would be private if it wasn't
934 // for the fact that we use it from anonymous namespaces.
935 void beginValidationDidFail();
936 NO_RETURN_DUE_TO_CRASH
void endValidationDidFail();
938 bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live.
941 virtual void visitWeakReferences(SlotVisitor
&) override
;
942 virtual void finalizeUnconditionally() override
;
945 void tallyFrequentExitSites();
947 void tallyFrequentExitSites() { }
951 friend class CodeBlockSet
;
953 CodeBlock
* specialOSREntryBlockOrNull();
955 void noticeIncomingCall(ExecState
* callerFrame
);
957 double optimizationThresholdScalingFactor();
960 ClosureCallStubRoutine
* findClosureCallForReturnPC(ReturnAddressPtr
);
963 void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles
, unsigned& numberOfSamplesInProfiles
);
965 void setConstantRegisters(const Vector
<WriteBarrier
<Unknown
>>& constants
)
967 size_t count
= constants
.size();
968 m_constantRegisters
.resize(count
);
969 for (size_t i
= 0; i
< count
; i
++)
970 m_constantRegisters
[i
].set(*m_vm
, ownerExecutable(), constants
[i
].get());
974 PrintStream
&, ExecState
*, const Instruction
* begin
, const Instruction
*&,
975 const StubInfoMap
& = StubInfoMap(), const CallLinkInfoMap
& = CallLinkInfoMap());
977 CString
registerName(int r
) const;
978 void printUnaryOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
979 void printBinaryOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
980 void printConditionalJump(PrintStream
&, ExecState
*, const Instruction
*, const Instruction
*&, int location
, const char* op
);
981 void printGetByIdOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&);
982 void printGetByIdCacheStatus(PrintStream
&, ExecState
*, int location
, const StubInfoMap
&);
983 enum CacheDumpMode
{ DumpCaches
, DontDumpCaches
};
984 void printCallOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
, CacheDumpMode
, bool& hasPrintedProfiling
, const CallLinkInfoMap
&);
985 void printPutByIdOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
986 void printLocationAndOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
987 void printLocationOpAndRegisterOperand(PrintStream
&, ExecState
*, int location
, const Instruction
*& it
, const char* op
, int operand
);
989 void beginDumpProfiling(PrintStream
&, bool& hasPrintedProfiling
);
990 void dumpValueProfiling(PrintStream
&, const Instruction
*&, bool& hasPrintedProfiling
);
991 void dumpArrayProfiling(PrintStream
&, const Instruction
*&, bool& hasPrintedProfiling
);
992 void dumpRareCaseProfile(PrintStream
&, const char* name
, RareCaseProfile
*, bool& hasPrintedProfiling
);
994 bool shouldImmediatelyAssumeLivenessDuringScan();
996 void propagateTransitions(SlotVisitor
&);
997 void determineLiveness(SlotVisitor
&);
999 void stronglyVisitStrongReferences(SlotVisitor
&);
1000 void stronglyVisitWeakReferences(SlotVisitor
&);
1002 void createRareDataIfNecessary()
1005 m_rareData
= adoptPtr(new RareData
);
1009 void resetStubInternal(RepatchBuffer
&, StructureStubInfo
&);
1010 void resetStubDuringGCInternal(RepatchBuffer
&, StructureStubInfo
&);
1012 WriteBarrier
<UnlinkedCodeBlock
> m_unlinkedCode
;
1013 int m_numParameters
;
1015 unsigned m_debuggerRequests
;
1017 unsigned m_hasDebuggerStatement
: 1;
1018 unsigned m_steppingMode
: 1;
1019 unsigned m_numBreakpoints
: 30;
1022 WriteBarrier
<ScriptExecutable
> m_ownerExecutable
;
1025 RefCountedArray
<Instruction
> m_instructions
;
1026 WriteBarrier
<SymbolTable
> m_symbolTable
;
1027 VirtualRegister m_thisRegister
;
1028 VirtualRegister m_argumentsRegister
;
1029 VirtualRegister m_activationRegister
;
1031 bool m_isStrictMode
;
1032 bool m_needsActivation
;
1033 bool m_mayBeExecuting
;
1034 uint8_t m_visitAggregateHasBeenCalled
;
1036 RefPtr
<SourceProvider
> m_source
;
1037 unsigned m_sourceOffset
;
1038 unsigned m_firstLineColumnOffset
;
1039 unsigned m_codeType
;
1041 Vector
<LLIntCallLinkInfo
> m_llintCallLinkInfos
;
1042 SentinelLinkedList
<LLIntCallLinkInfo
, BasicRawSentinelNode
<LLIntCallLinkInfo
>> m_incomingLLIntCalls
;
1043 RefPtr
<JITCode
> m_jitCode
;
1045 Bag
<StructureStubInfo
> m_stubInfos
;
1046 Vector
<ByValInfo
> m_byValInfos
;
1047 Bag
<CallLinkInfo
> m_callLinkInfos
;
1048 SentinelLinkedList
<CallLinkInfo
, BasicRawSentinelNode
<CallLinkInfo
>> m_incomingCalls
;
1050 OwnPtr
<CompactJITCodeMap
> m_jitCodeMap
;
1052 // This is relevant to non-DFG code blocks that serve as the profiled code block
1053 // for DFG code blocks.
1054 DFG::ExitProfile m_exitProfile
;
1055 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles
;
1057 Vector
<ValueProfile
> m_argumentValueProfiles
;
1058 Vector
<ValueProfile
> m_valueProfiles
;
1059 SegmentedVector
<RareCaseProfile
, 8> m_rareCaseProfiles
;
1060 SegmentedVector
<RareCaseProfile
, 8> m_specialFastCaseProfiles
;
1061 Vector
<ArrayAllocationProfile
> m_arrayAllocationProfiles
;
1062 ArrayProfileVector m_arrayProfiles
;
1063 Vector
<ObjectAllocationProfile
> m_objectAllocationProfiles
;
1066 COMPILE_ASSERT(sizeof(Register
) == sizeof(WriteBarrier
<Unknown
>), Register_must_be_same_size_as_WriteBarrier_Unknown
);
1067 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
1068 // it, so we're stuck with it for now.
1069 Vector
<WriteBarrier
<Unknown
>> m_constantRegisters
;
1070 Vector
<WriteBarrier
<FunctionExecutable
>> m_functionDecls
;
1071 Vector
<WriteBarrier
<FunctionExecutable
>> m_functionExprs
;
1073 RefPtr
<CodeBlock
> m_alternative
;
1075 BaselineExecutionCounter m_llintExecuteCounter
;
1077 BaselineExecutionCounter m_jitExecuteCounter
;
1078 int32_t m_totalJITExecutions
;
1079 uint32_t m_osrExitCounter
;
1080 uint16_t m_optimizationDelayCounter
;
1081 uint16_t m_reoptimizationRetryCounter
;
1083 mutable CodeBlockHash m_hash
;
1085 std::unique_ptr
<BytecodeLivenessAnalysis
> m_livenessAnalysis
;
1088 WTF_MAKE_FAST_ALLOCATED
;
1090 Vector
<HandlerInfo
> m_exceptionHandlers
;
1092 // Buffers used for large array literals
1093 Vector
<Vector
<JSValue
>> m_constantBuffers
;
1096 Vector
<SimpleJumpTable
> m_switchJumpTables
;
1097 Vector
<StringJumpTable
> m_stringSwitchJumpTables
;
1099 EvalCodeCache m_evalCodeCache
;
1102 friend void WTF::deleteOwnedPtr
<RareData
>(RareData
*);
1104 OwnPtr
<RareData
> m_rareData
;
1106 DFG::CapabilityLevel m_capabilityLevelState
;
1110 // Program code is not marked by any function, so we make the global object
1111 // responsible for marking it.
1113 class GlobalCodeBlock
: public CodeBlock
{
1115 GlobalCodeBlock(CopyParsedBlockTag
, GlobalCodeBlock
& other
)
1116 : CodeBlock(CopyParsedBlock
, other
)
1120 GlobalCodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
)
1121 : CodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, sourceOffset
, firstLineColumnOffset
)
1126 class ProgramCodeBlock
: public GlobalCodeBlock
{
1128 ProgramCodeBlock(CopyParsedBlockTag
, ProgramCodeBlock
& other
)
1129 : GlobalCodeBlock(CopyParsedBlock
, other
)
1133 ProgramCodeBlock(ProgramExecutable
* ownerExecutable
, UnlinkedProgramCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned firstLineColumnOffset
)
1134 : GlobalCodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, 0, firstLineColumnOffset
)
1140 virtual CodeBlock
* replacement() override
;
1141 virtual DFG::CapabilityLevel
capabilityLevelInternal() override
;
1145 class EvalCodeBlock
: public GlobalCodeBlock
{
1147 EvalCodeBlock(CopyParsedBlockTag
, EvalCodeBlock
& other
)
1148 : GlobalCodeBlock(CopyParsedBlock
, other
)
1152 EvalCodeBlock(EvalExecutable
* ownerExecutable
, UnlinkedEvalCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
)
1153 : GlobalCodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, 0, 1)
1157 const Identifier
& variable(unsigned index
) { return unlinkedEvalCodeBlock()->variable(index
); }
1158 unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
1162 virtual CodeBlock
* replacement() override
;
1163 virtual DFG::CapabilityLevel
capabilityLevelInternal() override
;
1167 UnlinkedEvalCodeBlock
* unlinkedEvalCodeBlock() const { return jsCast
<UnlinkedEvalCodeBlock
*>(unlinkedCodeBlock()); }
1170 class FunctionCodeBlock
: public CodeBlock
{
1172 FunctionCodeBlock(CopyParsedBlockTag
, FunctionCodeBlock
& other
)
1173 : CodeBlock(CopyParsedBlock
, other
)
1177 FunctionCodeBlock(FunctionExecutable
* ownerExecutable
, UnlinkedFunctionCodeBlock
* unlinkedCodeBlock
, JSScope
* scope
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
)
1178 : CodeBlock(ownerExecutable
, unlinkedCodeBlock
, scope
, sourceProvider
, sourceOffset
, firstLineColumnOffset
)
1184 virtual CodeBlock
* replacement() override
;
1185 virtual DFG::CapabilityLevel
capabilityLevelInternal() override
;
1189 inline CodeBlock
* baselineCodeBlockForInlineCallFrame(InlineCallFrame
* inlineCallFrame
)
1191 RELEASE_ASSERT(inlineCallFrame
);
1192 ExecutableBase
* executable
= inlineCallFrame
->executable
.get();
1193 RELEASE_ASSERT(executable
->structure()->classInfo() == FunctionExecutable::info());
1194 return static_cast<FunctionExecutable
*>(executable
)->baselineCodeBlockFor(inlineCallFrame
->isCall
? CodeForCall
: CodeForConstruct
);
1197 inline CodeBlock
* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin
& codeOrigin
, CodeBlock
* baselineCodeBlock
)
1199 if (codeOrigin
.inlineCallFrame
)
1200 return baselineCodeBlockForInlineCallFrame(codeOrigin
.inlineCallFrame
);
1201 return baselineCodeBlock
;
1204 inline int CodeBlock::argumentIndexAfterCapture(size_t argument
)
1206 if (argument
>= static_cast<size_t>(symbolTable()->parameterCount()))
1207 return CallFrame::argumentOffset(argument
);
1209 const SlowArgument
* slowArguments
= symbolTable()->slowArguments();
1210 if (!slowArguments
|| slowArguments
[argument
].status
== SlowArgument::Normal
)
1211 return CallFrame::argumentOffset(argument
);
1213 ASSERT(slowArguments
[argument
].status
== SlowArgument::Captured
);
1214 return slowArguments
[argument
].index
;
1217 inline bool CodeBlock::hasSlowArguments()
1219 return !!symbolTable()->slowArguments();
1222 inline Register
& ExecState::r(int index
)
1224 CodeBlock
* codeBlock
= this->codeBlock();
1225 if (codeBlock
->isConstantRegisterIndex(index
))
1226 return *reinterpret_cast<Register
*>(&codeBlock
->constantRegister(index
));
1230 inline Register
& ExecState::uncheckedR(int index
)
1232 RELEASE_ASSERT(index
< FirstConstantRegisterIndex
);
1236 inline JSValue
ExecState::argumentAfterCapture(size_t argument
)
1238 if (argument
>= argumentCount())
1239 return jsUndefined();
1242 return this[argumentOffset(argument
)].jsValue();
1244 return this[codeBlock()->argumentIndexAfterCapture(argument
)].jsValue();
1247 inline void CodeBlockSet::mark(void* candidateCodeBlock
)
1249 // We have to check for 0 and -1 because those are used by the HashMap as markers.
1250 uintptr_t value
= reinterpret_cast<uintptr_t>(candidateCodeBlock
);
1252 // This checks for both of those nasty cases in one go.
1258 CodeBlock
* codeBlock
= static_cast<CodeBlock
*>(candidateCodeBlock
);
1259 if (!m_oldCodeBlocks
.contains(codeBlock
) && !m_newCodeBlocks
.contains(codeBlock
))
1265 inline void CodeBlockSet::mark(CodeBlock
* codeBlock
)
1270 if (codeBlock
->m_mayBeExecuting
)
1273 codeBlock
->m_mayBeExecuting
= true;
1274 // We might not have cleared the marks for this CodeBlock, but we need to visit it.
1275 codeBlock
->m_visitAggregateHasBeenCalled
= false;
1277 m_currentlyExecuting
.append(codeBlock
);
1281 template <typename Functor
> inline void ScriptExecutable::forEachCodeBlock(Functor
&& functor
)
1284 case ProgramExecutableType
: {
1285 if (CodeBlock
* codeBlock
= jsCast
<ProgramExecutable
*>(this)->m_programCodeBlock
.get())
1286 codeBlock
->forEachRelatedCodeBlock(std::forward
<Functor
>(functor
));
1290 case EvalExecutableType
: {
1291 if (CodeBlock
* codeBlock
= jsCast
<EvalExecutable
*>(this)->m_evalCodeBlock
.get())
1292 codeBlock
->forEachRelatedCodeBlock(std::forward
<Functor
>(functor
));
1296 case FunctionExecutableType
: {
1297 Functor
f(std::forward
<Functor
>(functor
));
1298 FunctionExecutable
* executable
= jsCast
<FunctionExecutable
*>(this);
1299 if (CodeBlock
* codeBlock
= executable
->m_codeBlockForCall
.get())
1300 codeBlock
->forEachRelatedCodeBlock(f
);
1301 if (CodeBlock
* codeBlock
= executable
->m_codeBlockForConstruct
.get())
1302 codeBlock
->forEachRelatedCodeBlock(f
);
1306 RELEASE_ASSERT_NOT_REACHED();
1312 #endif // CodeBlock_h