2 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "ArrayProfile.h"
34 #include "ByValInfo.h"
35 #include "BytecodeConventions.h"
36 #include "CallLinkInfo.h"
37 #include "CallReturnOffsetToBytecodeOffset.h"
38 #include "CodeBlockHash.h"
39 #include "CodeOrigin.h"
41 #include "CompactJITCodeMap.h"
42 #include "DFGCodeBlocks.h"
43 #include "DFGCommon.h"
44 #include "DFGExitProfile.h"
45 #include "DFGMinifiedGraph.h"
46 #include "DFGOSREntry.h"
47 #include "DFGOSRExit.h"
48 #include "DFGVariableEventStream.h"
49 #include "EvalCodeCache.h"
50 #include "ExecutionCounter.h"
51 #include "ExpressionRangeInfo.h"
52 #include "HandlerInfo.h"
53 #include "ObjectAllocationProfile.h"
55 #include "Instruction.h"
57 #include "JITWriteBarrier.h"
58 #include "JSGlobalObject.h"
59 #include "JumpReplacementWatchpoint.h"
60 #include "JumpTable.h"
61 #include "LLIntCallLinkInfo.h"
62 #include "LazyOperandValueProfile.h"
64 #include "ProfilerCompilation.h"
65 #include "RegExpObject.h"
66 #include "ResolveOperation.h"
67 #include "StructureStubInfo.h"
68 #include "UnconditionalFinalizer.h"
69 #include "ValueProfile.h"
70 #include "Watchpoint.h"
71 #include <wtf/RefCountedArray.h>
72 #include <wtf/FastAllocBase.h>
73 #include <wtf/PassOwnPtr.h>
74 #include <wtf/Platform.h>
75 #include <wtf/RefPtr.h>
76 #include <wtf/SegmentedVector.h>
77 #include <wtf/Vector.h>
78 #include <wtf/text/WTFString.h>
84 class LLIntOffsetsExtractor
;
87 inline int unmodifiedArgumentsRegister(int argumentsRegister
) { return argumentsRegister
- 1; }
89 static ALWAYS_INLINE
int missingThisObjectMarker() { return std::numeric_limits
<int>::max(); }
91 class CodeBlock
: public UnconditionalFinalizer
, public WeakReferenceHarvester
{
92 WTF_MAKE_FAST_ALLOCATED
;
94 friend class LLIntOffsetsExtractor
;
96 enum CopyParsedBlockTag
{ CopyParsedBlock
};
98 CodeBlock(CopyParsedBlockTag
, CodeBlock
& other
);
100 CodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
*, JSGlobalObject
*, unsigned baseScopeDepth
, PassRefPtr
<SourceProvider
>, unsigned sourceOffset
, unsigned firstLineColumnOffset
, PassOwnPtr
<CodeBlock
> alternative
);
102 WriteBarrier
<JSGlobalObject
> m_globalObject
;
106 JS_EXPORT_PRIVATE
virtual ~CodeBlock();
108 UnlinkedCodeBlock
* unlinkedCodeBlock() const { return m_unlinkedCode
.get(); }
110 String
inferredName() const;
111 CodeBlockHash
hash() const;
112 String
sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
113 String
sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
114 void dumpAssumingJITType(PrintStream
&, JITCode::JITType
) const;
115 void dump(PrintStream
&) const;
117 int numParameters() const { return m_numParameters
; }
118 void setNumParameters(int newValue
);
120 int* addressOfNumParameters() { return &m_numParameters
; }
121 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock
, m_numParameters
); }
123 CodeBlock
* alternative() { return m_alternative
.get(); }
124 PassOwnPtr
<CodeBlock
> releaseAlternative() { return m_alternative
.release(); }
125 void setAlternative(PassOwnPtr
<CodeBlock
> alternative
) { m_alternative
= alternative
; }
127 CodeSpecializationKind
specializationKind() const
129 return specializationFromIsConstruct(m_isConstructor
);
133 CodeBlock
* baselineVersion()
135 CodeBlock
* result
= replacement();
137 return 0; // This can happen if we're in the process of creating the baseline version.
138 while (result
->alternative())
139 result
= result
->alternative();
141 ASSERT(JITCode::isBaselineCode(result
->getJITType()));
145 CodeBlock
* baselineVersion()
151 void visitAggregate(SlotVisitor
&);
153 static void dumpStatistics();
155 void dumpBytecode(PrintStream
& = WTF::dataFile());
156 void dumpBytecode(PrintStream
&, unsigned bytecodeOffset
);
157 void printStructures(PrintStream
&, const Instruction
*);
158 void printStructure(PrintStream
&, const char* name
, const Instruction
*, int operand
);
160 bool isStrictMode() const { return m_isStrictMode
; }
162 inline bool isKnownNotImmediate(int index
)
164 if (index
== m_thisRegister
&& !m_isStrictMode
)
167 if (isConstantRegisterIndex(index
))
168 return getConstant(index
).isCell();
173 ALWAYS_INLINE
bool isTemporaryRegisterIndex(int index
)
175 return index
>= m_numVars
;
178 HandlerInfo
* handlerForBytecodeOffset(unsigned bytecodeOffset
);
179 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset
);
180 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset
);
181 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset
, int& divot
,
182 int& startOffset
, int& endOffset
, unsigned& line
, unsigned& column
);
186 StructureStubInfo
& getStubInfo(ReturnAddressPtr returnAddress
)
188 return *(binarySearch
<StructureStubInfo
, void*>(m_structureStubInfos
, m_structureStubInfos
.size(), returnAddress
.value(), getStructureStubInfoReturnLocation
));
191 StructureStubInfo
& getStubInfo(unsigned bytecodeIndex
)
193 return *(binarySearch
<StructureStubInfo
, unsigned>(m_structureStubInfos
, m_structureStubInfos
.size(), bytecodeIndex
, getStructureStubInfoBytecodeIndex
));
196 void resetStub(StructureStubInfo
&);
198 ByValInfo
& getByValInfo(unsigned bytecodeIndex
)
200 return *(binarySearch
<ByValInfo
, unsigned>(m_byValInfos
, m_byValInfos
.size(), bytecodeIndex
, getByValInfoBytecodeIndex
));
203 CallLinkInfo
& getCallLinkInfo(ReturnAddressPtr returnAddress
)
205 return *(binarySearch
<CallLinkInfo
, void*>(m_callLinkInfos
, m_callLinkInfos
.size(), returnAddress
.value(), getCallLinkInfoReturnLocation
));
208 CallLinkInfo
& getCallLinkInfo(unsigned bytecodeIndex
)
210 ASSERT(JITCode::isBaselineCode(getJITType()));
211 return *(binarySearch
<CallLinkInfo
, unsigned>(m_callLinkInfos
, m_callLinkInfos
.size(), bytecodeIndex
, getCallLinkInfoBytecodeIndex
));
213 #endif // ENABLE(JIT)
216 Instruction
* adjustPCIfAtCallSite(Instruction
*);
218 unsigned bytecodeOffset(ExecState
*, ReturnAddressPtr
);
221 unsigned bytecodeOffsetForCallAtIndex(unsigned index
)
225 Vector
<CallReturnOffsetToBytecodeOffset
, 0, UnsafeVectorOverflow
>& callIndices
= m_rareData
->m_callReturnIndexVector
;
226 if (!callIndices
.size())
228 // FIXME: Fix places in DFG that call out to C that don't set the CodeOrigin. https://bugs.webkit.org/show_bug.cgi?id=118315
229 ASSERT(index
< m_rareData
->m_callReturnIndexVector
.size());
230 if (index
>= m_rareData
->m_callReturnIndexVector
.size())
232 return m_rareData
->m_callReturnIndexVector
[index
].bytecodeOffset
;
237 bool hasIncomingCalls() { return m_incomingCalls
.begin() != m_incomingCalls
.end(); }
239 void linkIncomingCall(CallLinkInfo
* incoming
)
241 m_incomingCalls
.push(incoming
);
244 bool isIncomingCallAlreadyLinked(CallLinkInfo
* incoming
)
246 return m_incomingCalls
.isOnList(incoming
);
248 #endif // ENABLE(JIT)
251 void linkIncomingCall(LLIntCallLinkInfo
* incoming
)
253 m_incomingLLIntCalls
.push(incoming
);
255 #endif // ENABLE(LLINT)
257 void unlinkIncomingCalls();
259 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
260 void setJITCodeMap(PassOwnPtr
<CompactJITCodeMap
> jitCodeMap
)
262 m_jitCodeMap
= jitCodeMap
;
264 CompactJITCodeMap
* jitCodeMap()
266 return m_jitCodeMap
.get();
271 void createDFGDataIfNecessary()
276 m_dfgData
= adoptPtr(new DFGData
);
279 void saveCompilation(PassRefPtr
<Profiler::Compilation
> compilation
)
281 createDFGDataIfNecessary();
282 m_dfgData
->compilation
= compilation
;
285 Profiler::Compilation
* compilation()
289 return m_dfgData
->compilation
.get();
292 DFG::OSREntryData
* appendDFGOSREntryData(unsigned bytecodeIndex
, unsigned machineCodeOffset
)
294 createDFGDataIfNecessary();
295 DFG::OSREntryData entry
;
296 entry
.m_bytecodeIndex
= bytecodeIndex
;
297 entry
.m_machineCodeOffset
= machineCodeOffset
;
298 m_dfgData
->osrEntry
.append(entry
);
299 return &m_dfgData
->osrEntry
.last();
301 unsigned numberOfDFGOSREntries() const
305 return m_dfgData
->osrEntry
.size();
307 DFG::OSREntryData
* dfgOSREntryData(unsigned i
) { return &m_dfgData
->osrEntry
[i
]; }
308 DFG::OSREntryData
* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex
)
312 return tryBinarySearch
<DFG::OSREntryData
, unsigned>(
313 m_dfgData
->osrEntry
, m_dfgData
->osrEntry
.size(), bytecodeIndex
,
314 DFG::getOSREntryDataBytecodeIndex
);
317 unsigned appendOSRExit(const DFG::OSRExit
& osrExit
)
319 createDFGDataIfNecessary();
320 unsigned result
= m_dfgData
->osrExit
.size();
321 m_dfgData
->osrExit
.append(osrExit
);
325 DFG::OSRExit
& lastOSRExit()
327 return m_dfgData
->osrExit
.last();
330 unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery
& recovery
)
332 createDFGDataIfNecessary();
333 unsigned result
= m_dfgData
->speculationRecovery
.size();
334 m_dfgData
->speculationRecovery
.append(recovery
);
338 unsigned appendWatchpoint(const JumpReplacementWatchpoint
& watchpoint
)
340 createDFGDataIfNecessary();
341 unsigned result
= m_dfgData
->watchpoints
.size();
342 m_dfgData
->watchpoints
.append(watchpoint
);
346 unsigned numberOfOSRExits()
350 return m_dfgData
->osrExit
.size();
353 unsigned numberOfSpeculationRecoveries()
357 return m_dfgData
->speculationRecovery
.size();
360 unsigned numberOfWatchpoints()
364 return m_dfgData
->watchpoints
.size();
367 DFG::OSRExit
& osrExit(unsigned index
)
369 return m_dfgData
->osrExit
[index
];
372 DFG::SpeculationRecovery
& speculationRecovery(unsigned index
)
374 return m_dfgData
->speculationRecovery
[index
];
377 JumpReplacementWatchpoint
& watchpoint(unsigned index
)
379 return m_dfgData
->watchpoints
[index
];
382 void appendWeakReference(JSCell
* target
)
384 createDFGDataIfNecessary();
385 m_dfgData
->weakReferences
.append(WriteBarrier
<JSCell
>(*vm(), ownerExecutable(), target
));
388 void appendWeakReferenceTransition(JSCell
* codeOrigin
, JSCell
* from
, JSCell
* to
)
390 createDFGDataIfNecessary();
391 m_dfgData
->transitions
.append(
392 WeakReferenceTransition(*vm(), ownerExecutable(), codeOrigin
, from
, to
));
395 DFG::MinifiedGraph
& minifiedDFG()
397 createDFGDataIfNecessary();
398 return m_dfgData
->minifiedDFG
;
401 DFG::VariableEventStream
& variableEventStream()
403 createDFGDataIfNecessary();
404 return m_dfgData
->variableEventStream
;
408 unsigned bytecodeOffset(Instruction
* returnAddress
)
410 RELEASE_ASSERT(returnAddress
>= instructions().begin() && returnAddress
< instructions().end());
411 return static_cast<Instruction
*>(returnAddress
) - instructions().begin();
414 bool isNumericCompareFunction() { return m_unlinkedCode
->isNumericCompareFunction(); }
416 unsigned numberOfInstructions() const { return m_instructions
.size(); }
417 RefCountedArray
<Instruction
>& instructions() { return m_instructions
; }
418 const RefCountedArray
<Instruction
>& instructions() const { return m_instructions
; }
420 size_t predictedMachineCodeSize();
422 bool usesOpcode(OpcodeID
);
424 unsigned instructionCount() { return m_instructions
.size(); }
426 int argumentIndexAfterCapture(size_t argument
);
429 void setJITCode(const JITCode
& code
, MacroAssemblerCodePtr codeWithArityCheck
)
432 m_jitCodeWithArityCheck
= codeWithArityCheck
;
434 if (m_jitCode
.jitType() == JITCode::DFGJIT
) {
435 createDFGDataIfNecessary();
436 m_vm
->heap
.m_dfgCodeBlocks
.m_set
.add(this);
440 JITCode
& getJITCode() { return m_jitCode
; }
441 MacroAssemblerCodePtr
getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck
; }
442 JITCode::JITType
getJITType() const { return m_jitCode
.jitType(); }
443 ExecutableMemoryHandle
* executableMemory() { return getJITCode().getExecutableMemory(); }
444 virtual JSObject
* compileOptimized(ExecState
*, JSScope
*, unsigned bytecodeIndex
) = 0;
446 enum JITCompilationResult
{ AlreadyCompiled
, CouldNotCompile
, CompiledSuccessfully
};
447 JITCompilationResult
jitCompile(ExecState
* exec
)
449 if (getJITType() != JITCode::InterpreterThunk
) {
450 ASSERT(getJITType() == JITCode::BaselineJIT
);
451 return AlreadyCompiled
;
454 if (jitCompileImpl(exec
))
455 return CompiledSuccessfully
;
456 return CouldNotCompile
;
459 return CouldNotCompile
;
462 virtual CodeBlock
* replacement() = 0;
464 virtual DFG::CapabilityLevel
canCompileWithDFGInternal() = 0;
465 DFG::CapabilityLevel
canCompileWithDFG()
467 DFG::CapabilityLevel result
= canCompileWithDFGInternal();
468 m_canCompileWithDFGState
= result
;
471 DFG::CapabilityLevel
canCompileWithDFGState() { return m_canCompileWithDFGState
; }
473 bool hasOptimizedReplacement()
475 ASSERT(JITCode::isBaselineCode(getJITType()));
476 bool result
= replacement()->getJITType() > getJITType();
479 ASSERT(replacement()->getJITType() == JITCode::DFGJIT
);
481 ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
482 ASSERT(replacement() == this);
488 JITCode::JITType
getJITType() const { return JITCode::BaselineJIT
; }
491 ScriptExecutable
* ownerExecutable() const { return m_ownerExecutable
.get(); }
493 void setVM(VM
* vm
) { m_vm
= vm
; }
494 VM
* vm() { return m_vm
; }
496 void setThisRegister(int thisRegister
) { m_thisRegister
= thisRegister
; }
497 int thisRegister() const { return m_thisRegister
; }
499 bool needsFullScopeChain() const { return m_unlinkedCode
->needsFullScopeChain(); }
500 bool usesEval() const { return m_unlinkedCode
->usesEval(); }
502 void setArgumentsRegister(int argumentsRegister
)
504 ASSERT(argumentsRegister
!= -1);
505 m_argumentsRegister
= argumentsRegister
;
506 ASSERT(usesArguments());
508 int argumentsRegister() const
510 ASSERT(usesArguments());
511 return m_argumentsRegister
;
513 int uncheckedArgumentsRegister()
515 if (!usesArguments())
516 return InvalidVirtualRegister
;
517 return argumentsRegister();
519 void setActivationRegister(int activationRegister
)
521 m_activationRegister
= activationRegister
;
523 int activationRegister() const
525 ASSERT(needsFullScopeChain());
526 return m_activationRegister
;
528 int uncheckedActivationRegister()
530 if (!needsFullScopeChain())
531 return InvalidVirtualRegister
;
532 return activationRegister();
534 bool usesArguments() const { return m_argumentsRegister
!= -1; }
536 bool needsActivation() const
538 return needsFullScopeChain() && codeType() != GlobalCode
;
541 bool isCaptured(int operand
, InlineCallFrame
* inlineCallFrame
= 0) const
543 if (operandIsArgument(operand
))
544 return operandToArgument(operand
) && usesArguments();
547 return inlineCallFrame
->capturedVars
.get(operand
);
549 // The activation object isn't in the captured region, but it's "captured"
550 // in the sense that stores to its location can be observed indirectly.
551 if (needsActivation() && operand
== activationRegister())
554 // Ditto for the arguments object.
555 if (usesArguments() && operand
== argumentsRegister())
558 // Ditto for the arguments object.
559 if (usesArguments() && operand
== unmodifiedArgumentsRegister(argumentsRegister()))
562 // We're in global code so there are no locals to capture
566 return operand
>= symbolTable()->captureStart()
567 && operand
< symbolTable()->captureEnd();
570 CodeType
codeType() const { return m_unlinkedCode
->codeType(); }
572 SourceProvider
* source() const { return m_source
.get(); }
573 unsigned sourceOffset() const { return m_sourceOffset
; }
574 unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset
; }
576 size_t numberOfJumpTargets() const { return m_unlinkedCode
->numberOfJumpTargets(); }
577 unsigned jumpTarget(int index
) const { return m_unlinkedCode
->jumpTarget(index
); }
579 void createActivation(CallFrame
*);
581 void clearEvalCache();
583 String
nameForRegister(int registerNumber
);
586 void setNumberOfStructureStubInfos(size_t size
) { m_structureStubInfos
.grow(size
); }
587 size_t numberOfStructureStubInfos() const { return m_structureStubInfos
.size(); }
588 StructureStubInfo
& structureStubInfo(int index
) { return m_structureStubInfos
[index
]; }
590 void setNumberOfByValInfos(size_t size
) { m_byValInfos
.grow(size
); }
591 size_t numberOfByValInfos() const { return m_byValInfos
.size(); }
592 ByValInfo
& byValInfo(size_t index
) { return m_byValInfos
[index
]; }
594 void setNumberOfCallLinkInfos(size_t size
) { m_callLinkInfos
.grow(size
); }
595 size_t numberOfCallLinkInfos() const { return m_callLinkInfos
.size(); }
596 CallLinkInfo
& callLinkInfo(int index
) { return m_callLinkInfos
[index
]; }
599 #if ENABLE(VALUE_PROFILER)
600 unsigned numberOfArgumentValueProfiles()
602 ASSERT(m_numParameters
>= 0);
603 ASSERT(m_argumentValueProfiles
.size() == static_cast<unsigned>(m_numParameters
));
604 return m_argumentValueProfiles
.size();
606 ValueProfile
* valueProfileForArgument(unsigned argumentIndex
)
608 ValueProfile
* result
= &m_argumentValueProfiles
[argumentIndex
];
609 ASSERT(result
->m_bytecodeOffset
== -1);
613 unsigned numberOfValueProfiles() { return m_valueProfiles
.size(); }
614 ValueProfile
* valueProfile(int index
) { return &m_valueProfiles
[index
]; }
615 ValueProfile
* valueProfileForBytecodeOffset(int bytecodeOffset
)
617 ValueProfile
* result
= binarySearch
<ValueProfile
, int>(
618 m_valueProfiles
, m_valueProfiles
.size(), bytecodeOffset
,
619 getValueProfileBytecodeOffset
<ValueProfile
>);
620 ASSERT(result
->m_bytecodeOffset
!= -1);
621 ASSERT(instructions()[bytecodeOffset
+ opcodeLength(
622 m_vm
->interpreter
->getOpcodeID(
624 bytecodeOffset
].u
.opcode
)) - 1].u
.profile
== result
);
627 SpeculatedType
valueProfilePredictionForBytecodeOffset(int bytecodeOffset
)
629 return valueProfileForBytecodeOffset(bytecodeOffset
)->computeUpdatedPrediction();
632 unsigned totalNumberOfValueProfiles()
634 return numberOfArgumentValueProfiles() + numberOfValueProfiles();
636 ValueProfile
* getFromAllValueProfiles(unsigned index
)
638 if (index
< numberOfArgumentValueProfiles())
639 return valueProfileForArgument(index
);
640 return valueProfile(index
- numberOfArgumentValueProfiles());
643 RareCaseProfile
* addRareCaseProfile(int bytecodeOffset
)
645 m_rareCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
646 return &m_rareCaseProfiles
.last();
648 unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles
.size(); }
649 RareCaseProfile
* rareCaseProfile(int index
) { return &m_rareCaseProfiles
[index
]; }
650 RareCaseProfile
* rareCaseProfileForBytecodeOffset(int bytecodeOffset
)
652 return tryBinarySearch
<RareCaseProfile
, int>(
653 m_rareCaseProfiles
, m_rareCaseProfiles
.size(), bytecodeOffset
,
654 getRareCaseProfileBytecodeOffset
);
657 bool likelyToTakeSlowCase(int bytecodeOffset
)
659 if (!numberOfRareCaseProfiles())
661 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
662 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
665 bool couldTakeSlowCase(int bytecodeOffset
)
667 if (!numberOfRareCaseProfiles())
669 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
670 return value
>= Options::couldTakeSlowCaseMinimumCount();
673 RareCaseProfile
* addSpecialFastCaseProfile(int bytecodeOffset
)
675 m_specialFastCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
676 return &m_specialFastCaseProfiles
.last();
678 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles
.size(); }
679 RareCaseProfile
* specialFastCaseProfile(int index
) { return &m_specialFastCaseProfiles
[index
]; }
680 RareCaseProfile
* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset
)
682 return tryBinarySearch
<RareCaseProfile
, int>(
683 m_specialFastCaseProfiles
, m_specialFastCaseProfiles
.size(), bytecodeOffset
,
684 getRareCaseProfileBytecodeOffset
);
687 bool likelyToTakeSpecialFastCase(int bytecodeOffset
)
689 if (!numberOfRareCaseProfiles())
691 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
692 return specialFastCaseCount
>= Options::likelyToTakeSlowCaseMinimumCount();
695 bool couldTakeSpecialFastCase(int bytecodeOffset
)
697 if (!numberOfRareCaseProfiles())
699 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
700 return specialFastCaseCount
>= Options::couldTakeSlowCaseMinimumCount();
703 bool likelyToTakeDeepestSlowCase(int bytecodeOffset
)
705 if (!numberOfRareCaseProfiles())
707 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
708 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
709 unsigned value
= slowCaseCount
- specialFastCaseCount
;
710 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
713 bool likelyToTakeAnySlowCase(int bytecodeOffset
)
715 if (!numberOfRareCaseProfiles())
717 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
718 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
719 unsigned value
= slowCaseCount
+ specialFastCaseCount
;
720 return value
>= Options::likelyToTakeSlowCaseMinimumCount();
723 unsigned numberOfArrayProfiles() const { return m_arrayProfiles
.size(); }
724 const ArrayProfileVector
& arrayProfiles() { return m_arrayProfiles
; }
725 ArrayProfile
* addArrayProfile(unsigned bytecodeOffset
)
727 m_arrayProfiles
.append(ArrayProfile(bytecodeOffset
));
728 return &m_arrayProfiles
.last();
730 ArrayProfile
* getArrayProfile(unsigned bytecodeOffset
);
731 ArrayProfile
* getOrAddArrayProfile(unsigned bytecodeOffset
);
734 // Exception handling support
736 size_t numberOfExceptionHandlers() const { return m_rareData
? m_rareData
->m_exceptionHandlers
.size() : 0; }
737 void allocateHandlers(const Vector
<UnlinkedHandlerInfo
>& unlinkedHandlers
)
739 size_t count
= unlinkedHandlers
.size();
742 createRareDataIfNecessary();
743 m_rareData
->m_exceptionHandlers
.resize(count
);
744 for (size_t i
= 0; i
< count
; ++i
) {
745 m_rareData
->m_exceptionHandlers
[i
].start
= unlinkedHandlers
[i
].start
;
746 m_rareData
->m_exceptionHandlers
[i
].end
= unlinkedHandlers
[i
].end
;
747 m_rareData
->m_exceptionHandlers
[i
].target
= unlinkedHandlers
[i
].target
;
748 m_rareData
->m_exceptionHandlers
[i
].scopeDepth
= unlinkedHandlers
[i
].scopeDepth
;
752 HandlerInfo
& exceptionHandler(int index
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_exceptionHandlers
[index
]; }
754 bool hasExpressionInfo() { return m_unlinkedCode
->hasExpressionInfo(); }
757 Vector
<CallReturnOffsetToBytecodeOffset
, 0, UnsafeVectorOverflow
>& callReturnIndexVector()
759 createRareDataIfNecessary();
760 return m_rareData
->m_callReturnIndexVector
;
765 SegmentedVector
<InlineCallFrame
, 4>& inlineCallFrames()
767 createRareDataIfNecessary();
768 return m_rareData
->m_inlineCallFrames
;
771 Vector
<CodeOriginAtCallReturnOffset
, 0, UnsafeVectorOverflow
>& codeOrigins()
773 createRareDataIfNecessary();
774 return m_rareData
->m_codeOrigins
;
777 // Having code origins implies that there has been some inlining.
778 bool hasCodeOrigins()
780 return m_rareData
&& !!m_rareData
->m_codeOrigins
.size();
783 bool codeOriginForReturn(ReturnAddressPtr
, CodeOrigin
&);
785 bool canGetCodeOrigin(unsigned index
)
789 return m_rareData
->m_codeOrigins
.size() > index
;
792 CodeOrigin
codeOrigin(unsigned index
)
794 RELEASE_ASSERT(m_rareData
);
795 return m_rareData
->m_codeOrigins
[index
].codeOrigin
;
798 bool addFrequentExitSite(const DFG::FrequentExitSite
& site
)
800 ASSERT(JITCode::isBaselineCode(getJITType()));
801 return m_exitProfile
.add(site
);
804 bool hasExitSite(const DFG::FrequentExitSite
& site
) const { return m_exitProfile
.hasExitSite(site
); }
806 DFG::ExitProfile
& exitProfile() { return m_exitProfile
; }
808 CompressedLazyOperandValueProfileHolder
& lazyOperandValueProfiles()
810 return m_lazyOperandValueProfiles
;
816 size_t numberOfIdentifiers() const { return m_identifiers
.size(); }
817 void addIdentifier(const Identifier
& i
) { return m_identifiers
.append(i
); }
818 Identifier
& identifier(int index
) { return m_identifiers
[index
]; }
820 size_t numberOfConstantRegisters() const { return m_constantRegisters
.size(); }
821 unsigned addConstant(JSValue v
)
823 unsigned result
= m_constantRegisters
.size();
824 m_constantRegisters
.append(WriteBarrier
<Unknown
>());
825 m_constantRegisters
.last().set(m_globalObject
->vm(), m_ownerExecutable
.get(), v
);
830 unsigned addOrFindConstant(JSValue
);
831 WriteBarrier
<Unknown
>& constantRegister(int index
) { return m_constantRegisters
[index
- FirstConstantRegisterIndex
]; }
832 ALWAYS_INLINE
bool isConstantRegisterIndex(int index
) const { return index
>= FirstConstantRegisterIndex
; }
833 ALWAYS_INLINE JSValue
getConstant(int index
) const { return m_constantRegisters
[index
- FirstConstantRegisterIndex
].get(); }
835 FunctionExecutable
* functionDecl(int index
) { return m_functionDecls
[index
].get(); }
836 int numberOfFunctionDecls() { return m_functionDecls
.size(); }
837 FunctionExecutable
* functionExpr(int index
) { return m_functionExprs
[index
].get(); }
839 RegExp
* regexp(int index
) const { return m_unlinkedCode
->regexp(index
); }
841 unsigned numberOfConstantBuffers() const
845 return m_rareData
->m_constantBuffers
.size();
847 unsigned addConstantBuffer(const Vector
<JSValue
>& buffer
)
849 createRareDataIfNecessary();
850 unsigned size
= m_rareData
->m_constantBuffers
.size();
851 m_rareData
->m_constantBuffers
.append(buffer
);
855 Vector
<JSValue
>& constantBufferAsVector(unsigned index
)
858 return m_rareData
->m_constantBuffers
[index
];
860 JSValue
* constantBuffer(unsigned index
)
862 return constantBufferAsVector(index
).data();
865 JSGlobalObject
* globalObject() { return m_globalObject
.get(); }
867 JSGlobalObject
* globalObjectFor(CodeOrigin
);
871 size_t numberOfImmediateSwitchJumpTables() const { return m_rareData
? m_rareData
->m_immediateSwitchJumpTables
.size() : 0; }
872 SimpleJumpTable
& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_immediateSwitchJumpTables
.append(SimpleJumpTable()); return m_rareData
->m_immediateSwitchJumpTables
.last(); }
873 SimpleJumpTable
& immediateSwitchJumpTable(int tableIndex
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_immediateSwitchJumpTables
[tableIndex
]; }
875 size_t numberOfCharacterSwitchJumpTables() const { return m_rareData
? m_rareData
->m_characterSwitchJumpTables
.size() : 0; }
876 SimpleJumpTable
& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_characterSwitchJumpTables
.append(SimpleJumpTable()); return m_rareData
->m_characterSwitchJumpTables
.last(); }
877 SimpleJumpTable
& characterSwitchJumpTable(int tableIndex
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_characterSwitchJumpTables
[tableIndex
]; }
879 size_t numberOfStringSwitchJumpTables() const { return m_rareData
? m_rareData
->m_stringSwitchJumpTables
.size() : 0; }
880 StringJumpTable
& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_stringSwitchJumpTables
.append(StringJumpTable()); return m_rareData
->m_stringSwitchJumpTables
.last(); }
881 StringJumpTable
& stringSwitchJumpTable(int tableIndex
) { RELEASE_ASSERT(m_rareData
); return m_rareData
->m_stringSwitchJumpTables
[tableIndex
]; }
884 SharedSymbolTable
* symbolTable() const { return m_unlinkedCode
->symbolTable(); }
886 EvalCodeCache
& evalCodeCache() { createRareDataIfNecessary(); return m_rareData
->m_evalCodeCache
; }
889 // Shrink prior to generating machine code that may point directly into vectors.
892 // Shrink after generating machine code, and after possibly creating new vectors
893 // and appending to others. At this time it is not safe to shrink certain vectors
894 // because we would have generated machine code that references them directly.
897 void shrinkToFit(ShrinkMode
);
899 void copyPostParseDataFrom(CodeBlock
* alternative
);
900 void copyPostParseDataFromAlternative();
902 // Functions for controlling when JITting kicks in, in a mixed mode
905 bool checkIfJITThresholdReached()
907 return m_llintExecuteCounter
.checkIfThresholdCrossedAndSet(this);
910 void dontJITAnytimeSoon()
912 m_llintExecuteCounter
.deferIndefinitely();
915 void jitAfterWarmUp()
917 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
922 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITSoon(), this);
925 const ExecutionCounter
& llintExecuteCounter() const
927 return m_llintExecuteCounter
;
930 // Functions for controlling when tiered compilation kicks in. This
931 // controls both when the optimizing compiler is invoked and when OSR
932 // entry happens. Two triggers exist: the loop trigger and the return
933 // trigger. In either case, when an addition to m_jitExecuteCounter
934 // causes it to become non-negative, the optimizing compiler is
935 // invoked. This includes a fast check to see if this CodeBlock has
936 // already been optimized (i.e. replacement() returns a CodeBlock
937 // that was optimized with a higher tier JIT than this one). In the
938 // case of the loop trigger, if the optimized compilation succeeds
939 // (or has already succeeded in the past) then OSR is attempted to
940 // redirect program flow into the optimized code.
942 // These functions are called from within the optimization triggers,
943 // and are used as a single point at which we define the heuristics
944 // for how much warm-up is mandated before the next optimization
945 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
946 // as this is called from the CodeBlock constructor.
948 // When we observe a lot of speculation failures, we trigger a
949 // reoptimization. But each time, we increase the optimization trigger
950 // to avoid thrashing.
951 unsigned reoptimizationRetryCounter() const;
952 void countReoptimization();
954 int32_t codeTypeThresholdMultiplier() const;
956 int32_t counterValueForOptimizeAfterWarmUp();
957 int32_t counterValueForOptimizeAfterLongWarmUp();
958 int32_t counterValueForOptimizeSoon();
960 int32_t* addressOfJITExecuteCounter()
962 return &m_jitExecuteCounter
.m_counter
;
965 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(ExecutionCounter
, m_counter
); }
966 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(ExecutionCounter
, m_activeThreshold
); }
967 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(ExecutionCounter
, m_totalCount
); }
969 const ExecutionCounter
& jitExecuteCounter() const { return m_jitExecuteCounter
; }
971 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter
; }
973 // Check if the optimization threshold has been reached, and if not,
974 // adjust the heuristics accordingly. Returns true if the threshold has
976 bool checkIfOptimizationThresholdReached();
978 // Call this to force the next optimization trigger to fire. This is
979 // rarely wise, since optimization triggers are typically more
980 // expensive than executing baseline code.
981 void optimizeNextInvocation();
983 // Call this to prevent optimization from happening again. Note that
984 // optimization will still happen after roughly 2^29 invocations,
985 // so this is really meant to delay that as much as possible. This
986 // is called if optimization failed, and we expect it to fail in
987 // the future as well.
988 void dontOptimizeAnytimeSoon();
990 // Call this to reinitialize the counter to its starting state,
991 // forcing a warm-up to happen before the next optimization trigger
992 // fires. This is called in the CodeBlock constructor. It also
993 // makes sense to call this if an OSR exit occurred. Note that
994 // OSR exit code is code generated, so the value of the execute
995 // counter that this corresponds to is also available directly.
996 void optimizeAfterWarmUp();
998 // Call this to force an optimization trigger to fire only after
1000 void optimizeAfterLongWarmUp();
1002 // Call this to cause an optimization trigger to fire soon, but
1003 // not necessarily the next one. This makes sense if optimization
1004 // succeeds. Successfuly optimization means that all calls are
1005 // relinked to the optimized code, so this only affects call
1006 // frames that are still executing this CodeBlock. The value here
1007 // is tuned to strike a balance between the cost of OSR entry
1008 // (which is too high to warrant making every loop back edge to
1009 // trigger OSR immediately) and the cost of executing baseline
1010 // code (which is high enough that we don't necessarily want to
1011 // have a full warm-up). The intuition for calling this instead of
1012 // optimizeNextInvocation() is for the case of recursive functions
1013 // with loops. Consider that there may be N call frames of some
1014 // recursive function, for a reasonably large value of N. The top
1015 // one triggers optimization, and then returns, and then all of
1016 // the others return. We don't want optimization to be triggered on
1017 // each return, as that would be superfluous. It only makes sense
1018 // to trigger optimization if one of those functions becomes hot
1019 // in the baseline code.
1020 void optimizeSoon();
1022 uint32_t osrExitCounter() const { return m_osrExitCounter
; }
1024 void countOSRExit() { m_osrExitCounter
++; }
1026 uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter
; }
1028 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_osrExitCounter
); }
1031 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold
);
1032 uint32_t exitCountThresholdForReoptimization();
1033 uint32_t exitCountThresholdForReoptimizationFromLoop();
1034 bool shouldReoptimizeNow();
1035 bool shouldReoptimizeFromLoopNow();
1038 #if ENABLE(VALUE_PROFILER)
1039 bool shouldOptimizeNow();
1040 void updateAllValueProfilePredictions(OperationInProgress
= NoOperation
);
1041 void updateAllArrayPredictions(OperationInProgress
= NoOperation
);
1042 void updateAllPredictions(OperationInProgress
= NoOperation
);
1044 bool shouldOptimizeNow() { return false; }
1045 void updateAllValueProfilePredictions(OperationInProgress
= NoOperation
) { }
1046 void updateAllArrayPredictions(OperationInProgress
= NoOperation
) { }
1047 void updateAllPredictions(OperationInProgress
= NoOperation
) { }
1054 #if ENABLE(VERBOSE_VALUE_PROFILE)
1055 void dumpValueProfiles();
1058 // FIXME: Make these remaining members private.
1060 int m_numCalleeRegisters
;
1062 bool m_isConstructor
;
1066 virtual bool jitCompileImpl(ExecState
*) = 0;
1067 virtual void jettisonImpl() = 0;
1069 virtual void visitWeakReferences(SlotVisitor
&);
1070 virtual void finalizeUnconditionally();
1073 void tallyFrequentExitSites();
1075 void tallyFrequentExitSites() { }
1079 friend class DFGCodeBlocks
;
1081 double optimizationThresholdScalingFactor();
1084 ClosureCallStubRoutine
* findClosureCallForReturnPC(ReturnAddressPtr
);
1087 #if ENABLE(VALUE_PROFILER)
1088 void updateAllPredictionsAndCountLiveness(OperationInProgress
, unsigned& numberOfLiveNonArgumentValueProfiles
, unsigned& numberOfSamplesInProfiles
);
1091 void setIdentifiers(const Vector
<Identifier
>& identifiers
)
1093 RELEASE_ASSERT(m_identifiers
.isEmpty());
1094 m_identifiers
.appendVector(identifiers
);
1097 void setConstantRegisters(const Vector
<WriteBarrier
<Unknown
> >& constants
)
1099 size_t count
= constants
.size();
1100 m_constantRegisters
.resize(count
);
1101 for (size_t i
= 0; i
< count
; i
++)
1102 m_constantRegisters
[i
].set(*m_vm
, ownerExecutable(), constants
[i
].get());
1105 void dumpBytecode(PrintStream
&, ExecState
*, const Instruction
* begin
, const Instruction
*&);
1107 CString
registerName(ExecState
*, int r
) const;
1108 void printUnaryOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
1109 void printBinaryOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
1110 void printConditionalJump(PrintStream
&, ExecState
*, const Instruction
*, const Instruction
*&, int location
, const char* op
);
1111 void printGetByIdOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&);
1112 void printGetByIdCacheStatus(PrintStream
&, ExecState
*, int location
);
1113 enum CacheDumpMode
{ DumpCaches
, DontDumpCaches
};
1114 void printCallOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
, CacheDumpMode
);
1115 void printPutByIdOp(PrintStream
&, ExecState
*, int location
, const Instruction
*&, const char* op
);
1116 void beginDumpProfiling(PrintStream
&, bool& hasPrintedProfiling
);
1117 void dumpValueProfiling(PrintStream
&, const Instruction
*&, bool& hasPrintedProfiling
);
1118 void dumpArrayProfiling(PrintStream
&, const Instruction
*&, bool& hasPrintedProfiling
);
1119 #if ENABLE(VALUE_PROFILER)
1120 void dumpRareCaseProfile(PrintStream
&, const char* name
, RareCaseProfile
*, bool& hasPrintedProfiling
);
1123 void visitStructures(SlotVisitor
&, Instruction
* vPC
);
1126 bool shouldImmediatelyAssumeLivenessDuringScan()
1128 // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
1129 // CodeBlocks don't need to be jettisoned when their weak references go
1130 // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
1131 // this means that it's live.
1135 // For simplicity, we don't attempt to jettison code blocks during GC if
1136 // they are executing. Instead we strongly mark their weak references to
1137 // allow them to continue to execute soundly.
1138 if (m_dfgData
->mayBeExecuting
)
1141 if (Options::forceDFGCodeBlockLiveness())
1147 bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1150 void performTracingFixpointIteration(SlotVisitor
&);
1152 void stronglyVisitStrongReferences(SlotVisitor
&);
1153 void stronglyVisitWeakReferences(SlotVisitor
&);
1155 void createRareDataIfNecessary()
1158 m_rareData
= adoptPtr(new RareData
);
1162 void resetStubInternal(RepatchBuffer
&, StructureStubInfo
&);
1163 void resetStubDuringGCInternal(RepatchBuffer
&, StructureStubInfo
&);
1165 WriteBarrier
<UnlinkedCodeBlock
> m_unlinkedCode
;
1166 int m_numParameters
;
1167 WriteBarrier
<ScriptExecutable
> m_ownerExecutable
;
1170 RefCountedArray
<Instruction
> m_instructions
;
1172 int m_argumentsRegister
;
1173 int m_activationRegister
;
1175 bool m_isStrictMode
;
1176 bool m_needsActivation
;
1178 RefPtr
<SourceProvider
> m_source
;
1179 unsigned m_sourceOffset
;
1180 unsigned m_firstLineColumnOffset
;
1181 unsigned m_codeType
;
1184 SegmentedVector
<LLIntCallLinkInfo
, 8> m_llintCallLinkInfos
;
1185 SentinelLinkedList
<LLIntCallLinkInfo
, BasicRawSentinelNode
<LLIntCallLinkInfo
> > m_incomingLLIntCalls
;
1188 Vector
<StructureStubInfo
> m_structureStubInfos
;
1189 Vector
<ByValInfo
> m_byValInfos
;
1190 Vector
<CallLinkInfo
> m_callLinkInfos
;
1192 MacroAssemblerCodePtr m_jitCodeWithArityCheck
;
1193 SentinelLinkedList
<CallLinkInfo
, BasicRawSentinelNode
<CallLinkInfo
> > m_incomingCalls
;
1195 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
1196 OwnPtr
<CompactJITCodeMap
> m_jitCodeMap
;
1199 struct WeakReferenceTransition
{
1200 WeakReferenceTransition() { }
1202 WeakReferenceTransition(VM
& vm
, JSCell
* owner
, JSCell
* codeOrigin
, JSCell
* from
, JSCell
* to
)
1203 : m_from(vm
, owner
, from
)
1204 , m_to(vm
, owner
, to
)
1207 m_codeOrigin
.set(vm
, owner
, codeOrigin
);
1210 WriteBarrier
<JSCell
> m_codeOrigin
;
1211 WriteBarrier
<JSCell
> m_from
;
1212 WriteBarrier
<JSCell
> m_to
;
1217 : mayBeExecuting(false)
1218 , isJettisoned(false)
1222 Vector
<DFG::OSREntryData
> osrEntry
;
1223 SegmentedVector
<DFG::OSRExit
, 8> osrExit
;
1224 Vector
<DFG::SpeculationRecovery
> speculationRecovery
;
1225 SegmentedVector
<JumpReplacementWatchpoint
, 1, 0> watchpoints
;
1226 Vector
<WeakReferenceTransition
> transitions
;
1227 Vector
<WriteBarrier
<JSCell
> > weakReferences
;
1228 DFG::VariableEventStream variableEventStream
;
1229 DFG::MinifiedGraph minifiedDFG
;
1230 RefPtr
<Profiler::Compilation
> compilation
;
1231 bool mayBeExecuting
;
1233 bool livenessHasBeenProved
; // Initialized and used on every GC.
1234 bool allTransitionsHaveBeenMarked
; // Initialized and used on every GC.
1235 unsigned visitAggregateHasBeenCalled
; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
1238 OwnPtr
<DFGData
> m_dfgData
;
1240 // This is relevant to non-DFG code blocks that serve as the profiled code block
1241 // for DFG code blocks.
1242 DFG::ExitProfile m_exitProfile
;
1243 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles
;
1245 #if ENABLE(VALUE_PROFILER)
1246 Vector
<ValueProfile
> m_argumentValueProfiles
;
1247 SegmentedVector
<ValueProfile
, 8> m_valueProfiles
;
1248 SegmentedVector
<RareCaseProfile
, 8> m_rareCaseProfiles
;
1249 SegmentedVector
<RareCaseProfile
, 8> m_specialFastCaseProfiles
;
1250 SegmentedVector
<ArrayAllocationProfile
, 8> m_arrayAllocationProfiles
;
1251 ArrayProfileVector m_arrayProfiles
;
1253 SegmentedVector
<ObjectAllocationProfile
, 8> m_objectAllocationProfiles
;
1256 Vector
<Identifier
> m_identifiers
;
1257 COMPILE_ASSERT(sizeof(Register
) == sizeof(WriteBarrier
<Unknown
>), Register_must_be_same_size_as_WriteBarrier_Unknown
);
1258 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
1259 // it, so we're stuck with it for now.
1260 Vector
<WriteBarrier
<Unknown
> > m_constantRegisters
;
1261 Vector
<WriteBarrier
<FunctionExecutable
> > m_functionDecls
;
1262 Vector
<WriteBarrier
<FunctionExecutable
> > m_functionExprs
;
1264 OwnPtr
<CodeBlock
> m_alternative
;
1266 ExecutionCounter m_llintExecuteCounter
;
1268 ExecutionCounter m_jitExecuteCounter
;
1269 int32_t m_totalJITExecutions
;
1270 uint32_t m_osrExitCounter
;
1271 uint16_t m_optimizationDelayCounter
;
1272 uint16_t m_reoptimizationRetryCounter
;
1274 Vector
<ResolveOperations
> m_resolveOperations
;
1275 Vector
<PutToBaseOperation
, 1> m_putToBaseOperations
;
1278 WTF_MAKE_FAST_ALLOCATED
;
1280 Vector
<HandlerInfo
> m_exceptionHandlers
;
1282 // Buffers used for large array literals
1283 Vector
<Vector
<JSValue
> > m_constantBuffers
;
1286 Vector
<SimpleJumpTable
> m_immediateSwitchJumpTables
;
1287 Vector
<SimpleJumpTable
> m_characterSwitchJumpTables
;
1288 Vector
<StringJumpTable
> m_stringSwitchJumpTables
;
1290 EvalCodeCache m_evalCodeCache
;
1293 Vector
<CallReturnOffsetToBytecodeOffset
, 0, UnsafeVectorOverflow
> m_callReturnIndexVector
;
1296 SegmentedVector
<InlineCallFrame
, 4> m_inlineCallFrames
;
1297 Vector
<CodeOriginAtCallReturnOffset
, 0, UnsafeVectorOverflow
> m_codeOrigins
;
1301 friend void WTF::deleteOwnedPtr
<RareData
>(RareData
*);
1303 OwnPtr
<RareData
> m_rareData
;
1305 DFG::CapabilityLevel m_canCompileWithDFGState
;
1309 // Program code is not marked by any function, so we make the global object
1310 // responsible for marking it.
1312 class GlobalCodeBlock
: public CodeBlock
{
1314 GlobalCodeBlock(CopyParsedBlockTag
, GlobalCodeBlock
& other
)
1315 : CodeBlock(CopyParsedBlock
, other
)
1319 GlobalCodeBlock(ScriptExecutable
* ownerExecutable
, UnlinkedCodeBlock
* unlinkedCodeBlock
, JSGlobalObject
* globalObject
, unsigned baseScopeDepth
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
, PassOwnPtr
<CodeBlock
> alternative
)
1320 : CodeBlock(ownerExecutable
, unlinkedCodeBlock
, globalObject
, baseScopeDepth
, sourceProvider
, sourceOffset
, firstLineColumnOffset
, alternative
)
1325 class ProgramCodeBlock
: public GlobalCodeBlock
{
1327 ProgramCodeBlock(CopyParsedBlockTag
, ProgramCodeBlock
& other
)
1328 : GlobalCodeBlock(CopyParsedBlock
, other
)
1332 ProgramCodeBlock(ProgramExecutable
* ownerExecutable
, UnlinkedProgramCodeBlock
* unlinkedCodeBlock
, JSGlobalObject
* globalObject
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned firstLineColumnOffset
, PassOwnPtr
<CodeBlock
> alternative
)
1333 : GlobalCodeBlock(ownerExecutable
, unlinkedCodeBlock
, globalObject
, 0, sourceProvider
, 0, firstLineColumnOffset
, alternative
)
1339 virtual JSObject
* compileOptimized(ExecState
*, JSScope
*, unsigned bytecodeIndex
);
1340 virtual void jettisonImpl();
1341 virtual bool jitCompileImpl(ExecState
*);
1342 virtual CodeBlock
* replacement();
1343 virtual DFG::CapabilityLevel
canCompileWithDFGInternal();
1347 class EvalCodeBlock
: public GlobalCodeBlock
{
1349 EvalCodeBlock(CopyParsedBlockTag
, EvalCodeBlock
& other
)
1350 : GlobalCodeBlock(CopyParsedBlock
, other
)
1354 EvalCodeBlock(EvalExecutable
* ownerExecutable
, UnlinkedEvalCodeBlock
* unlinkedCodeBlock
, JSGlobalObject
* globalObject
, PassRefPtr
<SourceProvider
> sourceProvider
, int baseScopeDepth
, PassOwnPtr
<CodeBlock
> alternative
)
1355 : GlobalCodeBlock(ownerExecutable
, unlinkedCodeBlock
, globalObject
, baseScopeDepth
, sourceProvider
, 0, 1, alternative
)
1359 const Identifier
& variable(unsigned index
) { return unlinkedEvalCodeBlock()->variable(index
); }
1360 unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
1364 virtual JSObject
* compileOptimized(ExecState
*, JSScope
*, unsigned bytecodeIndex
);
1365 virtual void jettisonImpl();
1366 virtual bool jitCompileImpl(ExecState
*);
1367 virtual CodeBlock
* replacement();
1368 virtual DFG::CapabilityLevel
canCompileWithDFGInternal();
1372 UnlinkedEvalCodeBlock
* unlinkedEvalCodeBlock() const { return jsCast
<UnlinkedEvalCodeBlock
*>(unlinkedCodeBlock()); }
1375 class FunctionCodeBlock
: public CodeBlock
{
1377 FunctionCodeBlock(CopyParsedBlockTag
, FunctionCodeBlock
& other
)
1378 : CodeBlock(CopyParsedBlock
, other
)
1382 FunctionCodeBlock(FunctionExecutable
* ownerExecutable
, UnlinkedFunctionCodeBlock
* unlinkedCodeBlock
, JSGlobalObject
* globalObject
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, unsigned firstLineColumnOffset
, PassOwnPtr
<CodeBlock
> alternative
= nullptr)
1383 : CodeBlock(ownerExecutable
, unlinkedCodeBlock
, globalObject
, 0, sourceProvider
, sourceOffset
, firstLineColumnOffset
, alternative
)
1389 virtual JSObject
* compileOptimized(ExecState
*, JSScope
*, unsigned bytecodeIndex
);
1390 virtual void jettisonImpl();
1391 virtual bool jitCompileImpl(ExecState
*);
1392 virtual CodeBlock
* replacement();
1393 virtual DFG::CapabilityLevel
canCompileWithDFGInternal();
1397 inline CodeBlock
* baselineCodeBlockForInlineCallFrame(InlineCallFrame
* inlineCallFrame
)
1399 RELEASE_ASSERT(inlineCallFrame
);
1400 ExecutableBase
* executable
= inlineCallFrame
->executable
.get();
1401 RELEASE_ASSERT(executable
->structure()->classInfo() == &FunctionExecutable::s_info
);
1402 return static_cast<FunctionExecutable
*>(executable
)->baselineCodeBlockFor(inlineCallFrame
->isCall
? CodeForCall
: CodeForConstruct
);
1405 inline CodeBlock
* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin
& codeOrigin
, CodeBlock
* baselineCodeBlock
)
1407 if (codeOrigin
.inlineCallFrame
)
1408 return baselineCodeBlockForInlineCallFrame(codeOrigin
.inlineCallFrame
);
1409 return baselineCodeBlock
;
1412 inline int CodeBlock::argumentIndexAfterCapture(size_t argument
)
1414 if (argument
>= static_cast<size_t>(symbolTable()->parameterCount()))
1415 return CallFrame::argumentOffset(argument
);
1417 const SlowArgument
* slowArguments
= symbolTable()->slowArguments();
1418 if (!slowArguments
|| slowArguments
[argument
].status
== SlowArgument::Normal
)
1419 return CallFrame::argumentOffset(argument
);
1421 ASSERT(slowArguments
[argument
].status
== SlowArgument::Captured
);
1422 return slowArguments
[argument
].index
;
1425 inline Register
& ExecState::r(int index
)
1427 CodeBlock
* codeBlock
= this->codeBlock();
1428 if (codeBlock
->isConstantRegisterIndex(index
))
1429 return *reinterpret_cast<Register
*>(&codeBlock
->constantRegister(index
));
1433 inline Register
& ExecState::uncheckedR(int index
)
1435 RELEASE_ASSERT(index
< FirstConstantRegisterIndex
);
1440 inline bool ExecState::isInlineCallFrame()
1442 if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT
))
1444 return isInlineCallFrameSlow();
1448 inline JSValue
ExecState::argumentAfterCapture(size_t argument
)
1450 if (argument
>= argumentCount())
1451 return jsUndefined();
1454 return this[argumentOffset(argument
)].jsValue();
1456 return this[codeBlock()->argumentIndexAfterCapture(argument
)].jsValue();
1460 inline void DFGCodeBlocks::mark(void* candidateCodeBlock
)
1462 // We have to check for 0 and -1 because those are used by the HashMap as markers.
1463 uintptr_t value
= reinterpret_cast<uintptr_t>(candidateCodeBlock
);
1465 // This checks for both of those nasty cases in one go.
1471 HashSet
<CodeBlock
*>::iterator iter
= m_set
.find(static_cast<CodeBlock
*>(candidateCodeBlock
));
1472 if (iter
== m_set
.end())
1475 (*iter
)->m_dfgData
->mayBeExecuting
= true;
1481 #endif // CodeBlock_h