2 * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "BytecodeConventions.h"
34 #include "CallLinkInfo.h"
35 #include "CallReturnOffsetToBytecodeOffset.h"
36 #include "CodeOrigin.h"
38 #include "CompactJITCodeMap.h"
39 #include "DFGCodeBlocks.h"
40 #include "DFGExitProfile.h"
41 #include "DFGOSREntry.h"
42 #include "DFGOSRExit.h"
43 #include "EvalCodeCache.h"
44 #include "ExecutionCounter.h"
45 #include "ExpressionRangeInfo.h"
46 #include "GlobalResolveInfo.h"
47 #include "HandlerInfo.h"
48 #include "MethodCallLinkInfo.h"
50 #include "Instruction.h"
52 #include "JITWriteBarrier.h"
53 #include "JSGlobalObject.h"
54 #include "JumpTable.h"
55 #include "LLIntCallLinkInfo.h"
56 #include "LazyOperandValueProfile.h"
59 #include "RegExpObject.h"
60 #include "StructureStubInfo.h"
62 #include "UnconditionalFinalizer.h"
63 #include "ValueProfile.h"
64 #include <wtf/RefCountedArray.h>
65 #include <wtf/FastAllocBase.h>
66 #include <wtf/PassOwnPtr.h>
67 #include <wtf/RefPtr.h>
68 #include <wtf/SegmentedVector.h>
69 #include <wtf/Vector.h>
70 #include "StructureStubInfo.h"
76 class LLIntOffsetsExtractor
;
78 inline int unmodifiedArgumentsRegister(int argumentsRegister
) { return argumentsRegister
- 1; }
80 static ALWAYS_INLINE
int missingThisObjectMarker() { return std::numeric_limits
<int>::max(); }
82 class CodeBlock
: public UnconditionalFinalizer
, public WeakReferenceHarvester
{
83 WTF_MAKE_FAST_ALLOCATED
;
85 friend class LLIntOffsetsExtractor
;
87 enum CopyParsedBlockTag
{ CopyParsedBlock
};
89 CodeBlock(CopyParsedBlockTag
, CodeBlock
& other
, SymbolTable
*);
91 CodeBlock(ScriptExecutable
* ownerExecutable
, CodeType
, JSGlobalObject
*, PassRefPtr
<SourceProvider
>, unsigned sourceOffset
, SymbolTable
*, bool isConstructor
, PassOwnPtr
<CodeBlock
> alternative
);
93 WriteBarrier
<JSGlobalObject
> m_globalObject
;
97 JS_EXPORT_PRIVATE
virtual ~CodeBlock();
99 int numParameters() const { return m_numParameters
; }
100 void setNumParameters(int newValue
);
103 int* addressOfNumParameters() { return &m_numParameters
; }
104 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock
, m_numParameters
); }
106 CodeBlock
* alternative() { return m_alternative
.get(); }
107 PassOwnPtr
<CodeBlock
> releaseAlternative() { return m_alternative
.release(); }
108 void setAlternative(PassOwnPtr
<CodeBlock
> alternative
) { m_alternative
= alternative
; }
110 CodeSpecializationKind
specializationKind()
113 return CodeForConstruct
;
118 CodeBlock
* baselineVersion()
120 CodeBlock
* result
= replacement();
122 return 0; // This can happen if we're in the process of creating the baseline version.
123 while (result
->alternative())
124 result
= result
->alternative();
126 ASSERT(JITCode::isBaselineCode(result
->getJITType()));
131 void visitAggregate(SlotVisitor
&);
133 static void dumpStatistics();
135 void dump(ExecState
*) const;
136 void printStructures(const Instruction
*) const;
137 void printStructure(const char* name
, const Instruction
*, int operand
) const;
139 bool isStrictMode() const { return m_isStrictMode
; }
141 inline bool isKnownNotImmediate(int index
)
143 if (index
== m_thisRegister
&& !m_isStrictMode
)
146 if (isConstantRegisterIndex(index
))
147 return getConstant(index
).isCell();
152 ALWAYS_INLINE
bool isTemporaryRegisterIndex(int index
)
154 return index
>= m_numVars
;
157 HandlerInfo
* handlerForBytecodeOffset(unsigned bytecodeOffset
);
158 int lineNumberForBytecodeOffset(unsigned bytecodeOffset
);
159 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset
, int& divot
, int& startOffset
, int& endOffset
);
163 StructureStubInfo
& getStubInfo(ReturnAddressPtr returnAddress
)
165 return *(binarySearch
<StructureStubInfo
, void*, getStructureStubInfoReturnLocation
>(m_structureStubInfos
.begin(), m_structureStubInfos
.size(), returnAddress
.value()));
168 StructureStubInfo
& getStubInfo(unsigned bytecodeIndex
)
170 return *(binarySearch
<StructureStubInfo
, unsigned, getStructureStubInfoBytecodeIndex
>(m_structureStubInfos
.begin(), m_structureStubInfos
.size(), bytecodeIndex
));
173 CallLinkInfo
& getCallLinkInfo(ReturnAddressPtr returnAddress
)
175 return *(binarySearch
<CallLinkInfo
, void*, getCallLinkInfoReturnLocation
>(m_callLinkInfos
.begin(), m_callLinkInfos
.size(), returnAddress
.value()));
178 CallLinkInfo
& getCallLinkInfo(unsigned bytecodeIndex
)
180 return *(binarySearch
<CallLinkInfo
, unsigned, getCallLinkInfoBytecodeIndex
>(m_callLinkInfos
.begin(), m_callLinkInfos
.size(), bytecodeIndex
));
183 MethodCallLinkInfo
& getMethodCallLinkInfo(ReturnAddressPtr returnAddress
)
185 return *(binarySearch
<MethodCallLinkInfo
, void*, getMethodCallLinkInfoReturnLocation
>(m_methodCallLinkInfos
.begin(), m_methodCallLinkInfos
.size(), returnAddress
.value()));
188 MethodCallLinkInfo
& getMethodCallLinkInfo(unsigned bytecodeIndex
)
190 return *(binarySearch
<MethodCallLinkInfo
, unsigned, getMethodCallLinkInfoBytecodeIndex
>(m_methodCallLinkInfos
.begin(), m_methodCallLinkInfos
.size(), bytecodeIndex
));
193 unsigned bytecodeOffset(ExecState
*, ReturnAddressPtr
);
195 unsigned bytecodeOffsetForCallAtIndex(unsigned index
)
199 Vector
<CallReturnOffsetToBytecodeOffset
>& callIndices
= m_rareData
->m_callReturnIndexVector
;
200 if (!callIndices
.size())
202 ASSERT(index
< m_rareData
->m_callReturnIndexVector
.size());
203 return m_rareData
->m_callReturnIndexVector
[index
].bytecodeOffset
;
208 bool hasIncomingCalls() { return m_incomingCalls
.begin() != m_incomingCalls
.end(); }
210 void linkIncomingCall(CallLinkInfo
* incoming
)
212 m_incomingCalls
.push(incoming
);
215 void linkIncomingCall(LLIntCallLinkInfo
* incoming
)
217 m_incomingLLIntCalls
.push(incoming
);
219 #endif // ENABLE(LLINT)
221 void unlinkIncomingCalls();
222 #endif // ENABLE(JIT)
224 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
225 void setJITCodeMap(PassOwnPtr
<CompactJITCodeMap
> jitCodeMap
)
227 m_jitCodeMap
= jitCodeMap
;
229 CompactJITCodeMap
* jitCodeMap()
231 return m_jitCodeMap
.get();
236 void createDFGDataIfNecessary()
241 m_dfgData
= adoptPtr(new DFGData
);
244 DFG::OSREntryData
* appendDFGOSREntryData(unsigned bytecodeIndex
, unsigned machineCodeOffset
)
246 createDFGDataIfNecessary();
247 DFG::OSREntryData entry
;
248 entry
.m_bytecodeIndex
= bytecodeIndex
;
249 entry
.m_machineCodeOffset
= machineCodeOffset
;
250 m_dfgData
->osrEntry
.append(entry
);
251 return &m_dfgData
->osrEntry
.last();
253 unsigned numberOfDFGOSREntries() const
257 return m_dfgData
->osrEntry
.size();
259 DFG::OSREntryData
* dfgOSREntryData(unsigned i
) { return &m_dfgData
->osrEntry
[i
]; }
260 DFG::OSREntryData
* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex
)
262 return binarySearch
<DFG::OSREntryData
, unsigned, DFG::getOSREntryDataBytecodeIndex
>(m_dfgData
->osrEntry
.begin(), m_dfgData
->osrEntry
.size(), bytecodeIndex
);
265 void appendOSRExit(const DFG::OSRExit
& osrExit
)
267 createDFGDataIfNecessary();
268 m_dfgData
->osrExit
.append(osrExit
);
271 DFG::OSRExit
& lastOSRExit()
273 return m_dfgData
->osrExit
.last();
276 void appendSpeculationRecovery(const DFG::SpeculationRecovery
& recovery
)
278 createDFGDataIfNecessary();
279 m_dfgData
->speculationRecovery
.append(recovery
);
282 unsigned numberOfOSRExits()
286 return m_dfgData
->osrExit
.size();
289 unsigned numberOfSpeculationRecoveries()
293 return m_dfgData
->speculationRecovery
.size();
296 DFG::OSRExit
& osrExit(unsigned index
)
298 return m_dfgData
->osrExit
[index
];
301 DFG::SpeculationRecovery
& speculationRecovery(unsigned index
)
303 return m_dfgData
->speculationRecovery
[index
];
306 void appendWeakReference(JSCell
* target
)
308 createDFGDataIfNecessary();
309 m_dfgData
->weakReferences
.append(WriteBarrier
<JSCell
>(*globalData(), ownerExecutable(), target
));
312 void shrinkWeakReferencesToFit()
316 m_dfgData
->weakReferences
.shrinkToFit();
319 void appendWeakReferenceTransition(JSCell
* codeOrigin
, JSCell
* from
, JSCell
* to
)
321 createDFGDataIfNecessary();
322 m_dfgData
->transitions
.append(
323 WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin
, from
, to
));
326 void shrinkWeakReferenceTransitionsToFit()
330 m_dfgData
->transitions
.shrinkToFit();
334 unsigned bytecodeOffset(Instruction
* returnAddress
)
336 ASSERT(returnAddress
>= instructions().begin() && returnAddress
< instructions().end());
337 return static_cast<Instruction
*>(returnAddress
) - instructions().begin();
340 void setIsNumericCompareFunction(bool isNumericCompareFunction
) { m_isNumericCompareFunction
= isNumericCompareFunction
; }
341 bool isNumericCompareFunction() { return m_isNumericCompareFunction
; }
343 unsigned numberOfInstructions() const { return m_instructions
.size(); }
344 RefCountedArray
<Instruction
>& instructions() { return m_instructions
; }
345 const RefCountedArray
<Instruction
>& instructions() const { return m_instructions
; }
347 size_t predictedMachineCodeSize();
349 bool usesOpcode(OpcodeID
);
351 unsigned instructionCount() { return m_instructions
.size(); }
354 void setJITCode(const JITCode
& code
, MacroAssemblerCodePtr codeWithArityCheck
)
357 m_jitCodeWithArityCheck
= codeWithArityCheck
;
359 if (m_jitCode
.jitType() == JITCode::DFGJIT
) {
360 createDFGDataIfNecessary();
361 m_globalData
->heap
.m_dfgCodeBlocks
.m_set
.add(this);
365 JITCode
& getJITCode() { return m_jitCode
; }
366 MacroAssemblerCodePtr
getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck
; }
367 JITCode::JITType
getJITType() { return m_jitCode
.jitType(); }
368 ExecutableMemoryHandle
* executableMemory() { return getJITCode().getExecutableMemory(); }
369 virtual JSObject
* compileOptimized(ExecState
*, ScopeChainNode
*) = 0;
370 virtual void jettison() = 0;
371 enum JITCompilationResult
{ AlreadyCompiled
, CouldNotCompile
, CompiledSuccessfully
};
372 JITCompilationResult
jitCompile(JSGlobalData
& globalData
)
374 if (getJITType() != JITCode::InterpreterThunk
) {
375 ASSERT(getJITType() == JITCode::BaselineJIT
);
376 return AlreadyCompiled
;
379 if (jitCompileImpl(globalData
))
380 return CompiledSuccessfully
;
381 return CouldNotCompile
;
383 UNUSED_PARAM(globalData
);
384 return CouldNotCompile
;
387 virtual CodeBlock
* replacement() = 0;
389 enum CompileWithDFGState
{
395 virtual bool canCompileWithDFGInternal() = 0;
396 bool canCompileWithDFG()
398 bool result
= canCompileWithDFGInternal();
399 m_canCompileWithDFGState
= result
? CompileWithDFGTrue
: CompileWithDFGFalse
;
402 CompileWithDFGState
canCompileWithDFGState() { return m_canCompileWithDFGState
; }
404 bool hasOptimizedReplacement()
406 ASSERT(JITCode::isBaselineCode(getJITType()));
407 bool result
= replacement()->getJITType() > getJITType();
410 ASSERT(replacement()->getJITType() == JITCode::DFGJIT
);
412 ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
413 ASSERT(replacement() == this);
419 JITCode::JITType
getJITType() { return JITCode::BaselineJIT
; }
422 ScriptExecutable
* ownerExecutable() const { return m_ownerExecutable
.get(); }
424 void setGlobalData(JSGlobalData
* globalData
) { m_globalData
= globalData
; }
425 JSGlobalData
* globalData() { return m_globalData
; }
427 void setThisRegister(int thisRegister
) { m_thisRegister
= thisRegister
; }
428 int thisRegister() const { return m_thisRegister
; }
430 void setNeedsFullScopeChain(bool needsFullScopeChain
) { m_needsFullScopeChain
= needsFullScopeChain
; }
431 bool needsFullScopeChain() const { return m_needsFullScopeChain
; }
432 void setUsesEval(bool usesEval
) { m_usesEval
= usesEval
; }
433 bool usesEval() const { return m_usesEval
; }
435 void setArgumentsRegister(int argumentsRegister
)
437 ASSERT(argumentsRegister
!= -1);
438 m_argumentsRegister
= argumentsRegister
;
439 ASSERT(usesArguments());
441 int argumentsRegister()
443 ASSERT(usesArguments());
444 return m_argumentsRegister
;
446 void setActivationRegister(int activationRegister
)
448 m_activationRegister
= activationRegister
;
450 int activationRegister()
452 ASSERT(needsFullScopeChain());
453 return m_activationRegister
;
455 bool usesArguments() const { return m_argumentsRegister
!= -1; }
457 CodeType
codeType() const { return m_codeType
; }
459 SourceProvider
* source() const { return m_source
.get(); }
460 unsigned sourceOffset() const { return m_sourceOffset
; }
462 size_t numberOfJumpTargets() const { return m_jumpTargets
.size(); }
463 void addJumpTarget(unsigned jumpTarget
) { m_jumpTargets
.append(jumpTarget
); }
464 unsigned jumpTarget(int index
) const { return m_jumpTargets
[index
]; }
465 unsigned lastJumpTarget() const { return m_jumpTargets
.last(); }
467 void createActivation(CallFrame
*);
469 void clearEvalCache();
471 void addPropertyAccessInstruction(unsigned propertyAccessInstruction
)
473 m_propertyAccessInstructions
.append(propertyAccessInstruction
);
475 void addGlobalResolveInstruction(unsigned globalResolveInstruction
)
477 m_globalResolveInstructions
.append(globalResolveInstruction
);
479 bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset
);
481 LLIntCallLinkInfo
* addLLIntCallLinkInfo()
483 m_llintCallLinkInfos
.append(LLIntCallLinkInfo());
484 return &m_llintCallLinkInfos
.last();
488 void setNumberOfStructureStubInfos(size_t size
) { m_structureStubInfos
.grow(size
); }
489 size_t numberOfStructureStubInfos() const { return m_structureStubInfos
.size(); }
490 StructureStubInfo
& structureStubInfo(int index
) { return m_structureStubInfos
[index
]; }
492 void addGlobalResolveInfo(unsigned globalResolveInstruction
)
494 m_globalResolveInfos
.append(GlobalResolveInfo(globalResolveInstruction
));
496 GlobalResolveInfo
& globalResolveInfo(int index
) { return m_globalResolveInfos
[index
]; }
497 bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset
);
499 void setNumberOfCallLinkInfos(size_t size
) { m_callLinkInfos
.grow(size
); }
500 size_t numberOfCallLinkInfos() const { return m_callLinkInfos
.size(); }
501 CallLinkInfo
& callLinkInfo(int index
) { return m_callLinkInfos
[index
]; }
503 void addMethodCallLinkInfos(unsigned n
) { ASSERT(m_globalData
->canUseJIT()); m_methodCallLinkInfos
.grow(n
); }
504 MethodCallLinkInfo
& methodCallLinkInfo(int index
) { return m_methodCallLinkInfos
[index
]; }
505 size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos
.size(); }
508 #if ENABLE(VALUE_PROFILER)
509 unsigned numberOfArgumentValueProfiles()
511 ASSERT(m_numParameters
>= 0);
512 ASSERT(m_argumentValueProfiles
.size() == static_cast<unsigned>(m_numParameters
));
513 return m_argumentValueProfiles
.size();
515 ValueProfile
* valueProfileForArgument(unsigned argumentIndex
)
517 ValueProfile
* result
= &m_argumentValueProfiles
[argumentIndex
];
518 ASSERT(result
->m_bytecodeOffset
== -1);
522 ValueProfile
* addValueProfile(int bytecodeOffset
)
524 ASSERT(bytecodeOffset
!= -1);
525 ASSERT(m_valueProfiles
.isEmpty() || m_valueProfiles
.last().m_bytecodeOffset
< bytecodeOffset
);
526 m_valueProfiles
.append(ValueProfile(bytecodeOffset
));
527 return &m_valueProfiles
.last();
529 unsigned numberOfValueProfiles() { return m_valueProfiles
.size(); }
530 ValueProfile
* valueProfile(int index
)
532 ValueProfile
* result
= &m_valueProfiles
[index
];
533 ASSERT(result
->m_bytecodeOffset
!= -1);
536 ValueProfile
* valueProfileForBytecodeOffset(int bytecodeOffset
)
538 ValueProfile
* result
= WTF::genericBinarySearch
<ValueProfile
, int, getValueProfileBytecodeOffset
>(m_valueProfiles
, m_valueProfiles
.size(), bytecodeOffset
);
539 ASSERT(result
->m_bytecodeOffset
!= -1);
540 ASSERT(instructions()[bytecodeOffset
+ opcodeLength(
541 m_globalData
->interpreter
->getOpcodeID(
543 bytecodeOffset
].u
.opcode
)) - 1].u
.profile
== result
);
546 PredictedType
valueProfilePredictionForBytecodeOffset(int bytecodeOffset
)
548 return valueProfileForBytecodeOffset(bytecodeOffset
)->computeUpdatedPrediction();
551 unsigned totalNumberOfValueProfiles()
553 return numberOfArgumentValueProfiles() + numberOfValueProfiles();
555 ValueProfile
* getFromAllValueProfiles(unsigned index
)
557 if (index
< numberOfArgumentValueProfiles())
558 return valueProfileForArgument(index
);
559 return valueProfile(index
- numberOfArgumentValueProfiles());
562 RareCaseProfile
* addRareCaseProfile(int bytecodeOffset
)
564 m_rareCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
565 return &m_rareCaseProfiles
.last();
567 unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles
.size(); }
568 RareCaseProfile
* rareCaseProfile(int index
) { return &m_rareCaseProfiles
[index
]; }
569 RareCaseProfile
* rareCaseProfileForBytecodeOffset(int bytecodeOffset
)
571 return WTF::genericBinarySearch
<RareCaseProfile
, int, getRareCaseProfileBytecodeOffset
>(m_rareCaseProfiles
, m_rareCaseProfiles
.size(), bytecodeOffset
);
574 bool likelyToTakeSlowCase(int bytecodeOffset
)
576 if (!numberOfRareCaseProfiles())
578 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
579 return value
>= Options::likelyToTakeSlowCaseMinimumCount
&& static_cast<double>(value
) / m_executionEntryCount
>= Options::likelyToTakeSlowCaseThreshold
;
582 bool couldTakeSlowCase(int bytecodeOffset
)
584 if (!numberOfRareCaseProfiles())
586 unsigned value
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
587 return value
>= Options::couldTakeSlowCaseMinimumCount
&& static_cast<double>(value
) / m_executionEntryCount
>= Options::couldTakeSlowCaseThreshold
;
590 RareCaseProfile
* addSpecialFastCaseProfile(int bytecodeOffset
)
592 m_specialFastCaseProfiles
.append(RareCaseProfile(bytecodeOffset
));
593 return &m_specialFastCaseProfiles
.last();
595 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles
.size(); }
596 RareCaseProfile
* specialFastCaseProfile(int index
) { return &m_specialFastCaseProfiles
[index
]; }
597 RareCaseProfile
* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset
)
599 return WTF::genericBinarySearch
<RareCaseProfile
, int, getRareCaseProfileBytecodeOffset
>(m_specialFastCaseProfiles
, m_specialFastCaseProfiles
.size(), bytecodeOffset
);
602 bool likelyToTakeSpecialFastCase(int bytecodeOffset
)
604 if (!numberOfRareCaseProfiles())
606 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
607 return specialFastCaseCount
>= Options::likelyToTakeSlowCaseMinimumCount
&& static_cast<double>(specialFastCaseCount
) / m_executionEntryCount
>= Options::likelyToTakeSlowCaseThreshold
;
610 bool likelyToTakeDeepestSlowCase(int bytecodeOffset
)
612 if (!numberOfRareCaseProfiles())
614 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
615 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
616 unsigned value
= slowCaseCount
- specialFastCaseCount
;
617 return value
>= Options::likelyToTakeSlowCaseMinimumCount
&& static_cast<double>(value
) / m_executionEntryCount
>= Options::likelyToTakeSlowCaseThreshold
;
620 bool likelyToTakeAnySlowCase(int bytecodeOffset
)
622 if (!numberOfRareCaseProfiles())
624 unsigned slowCaseCount
= rareCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
625 unsigned specialFastCaseCount
= specialFastCaseProfileForBytecodeOffset(bytecodeOffset
)->m_counter
;
626 unsigned value
= slowCaseCount
+ specialFastCaseCount
;
627 return value
>= Options::likelyToTakeSlowCaseMinimumCount
&& static_cast<double>(value
) / m_executionEntryCount
>= Options::likelyToTakeSlowCaseThreshold
;
630 unsigned executionEntryCount() const { return m_executionEntryCount
; }
633 unsigned globalResolveInfoCount() const
636 if (m_globalData
->canUseJIT())
637 return m_globalResolveInfos
.size();
642 // Exception handling support
644 size_t numberOfExceptionHandlers() const { return m_rareData
? m_rareData
->m_exceptionHandlers
.size() : 0; }
645 void addExceptionHandler(const HandlerInfo
& hanler
) { createRareDataIfNecessary(); return m_rareData
->m_exceptionHandlers
.append(hanler
); }
646 HandlerInfo
& exceptionHandler(int index
) { ASSERT(m_rareData
); return m_rareData
->m_exceptionHandlers
[index
]; }
648 void addExpressionInfo(const ExpressionRangeInfo
& expressionInfo
)
650 createRareDataIfNecessary();
651 m_rareData
->m_expressionInfo
.append(expressionInfo
);
654 void addLineInfo(unsigned bytecodeOffset
, int lineNo
)
656 createRareDataIfNecessary();
657 Vector
<LineInfo
>& lineInfo
= m_rareData
->m_lineInfo
;
658 if (!lineInfo
.size() || lineInfo
.last().lineNumber
!= lineNo
) {
659 LineInfo info
= { bytecodeOffset
, lineNo
};
660 lineInfo
.append(info
);
664 bool hasExpressionInfo() { return m_rareData
&& m_rareData
->m_expressionInfo
.size(); }
665 bool hasLineInfo() { return m_rareData
&& m_rareData
->m_lineInfo
.size(); }
666 // We only generate exception handling info if the user is debugging
667 // (and may want line number info), or if the function contains exception handler.
668 bool needsCallReturnIndices()
671 (m_rareData
->m_expressionInfo
.size() || m_rareData
->m_lineInfo
.size() || m_rareData
->m_exceptionHandlers
.size());
675 Vector
<CallReturnOffsetToBytecodeOffset
>& callReturnIndexVector()
677 createRareDataIfNecessary();
678 return m_rareData
->m_callReturnIndexVector
;
683 SegmentedVector
<InlineCallFrame
, 4>& inlineCallFrames()
685 createRareDataIfNecessary();
686 return m_rareData
->m_inlineCallFrames
;
689 Vector
<CodeOriginAtCallReturnOffset
>& codeOrigins()
691 createRareDataIfNecessary();
692 return m_rareData
->m_codeOrigins
;
695 // Having code origins implies that there has been some inlining.
696 bool hasCodeOrigins()
698 return m_rareData
&& !!m_rareData
->m_codeOrigins
.size();
701 bool codeOriginForReturn(ReturnAddressPtr returnAddress
, CodeOrigin
& codeOrigin
)
703 if (!hasCodeOrigins())
705 unsigned offset
= getJITCode().offsetOf(returnAddress
.value());
706 CodeOriginAtCallReturnOffset
* entry
= binarySearch
<CodeOriginAtCallReturnOffset
, unsigned, getCallReturnOffsetForCodeOrigin
>(codeOrigins().begin(), codeOrigins().size(), offset
, WTF::KeyMustNotBePresentInArray
);
707 if (entry
->callReturnOffset
!= offset
)
709 codeOrigin
= entry
->codeOrigin
;
713 CodeOrigin
codeOrigin(unsigned index
)
716 return m_rareData
->m_codeOrigins
[index
].codeOrigin
;
719 bool addFrequentExitSite(const DFG::FrequentExitSite
& site
)
721 ASSERT(JITCode::isBaselineCode(getJITType()));
722 return m_exitProfile
.add(site
);
725 DFG::ExitProfile
& exitProfile() { return m_exitProfile
; }
727 CompressedLazyOperandValueProfileHolder
& lazyOperandValueProfiles()
729 return m_lazyOperandValueProfiles
;
735 size_t numberOfIdentifiers() const { return m_identifiers
.size(); }
736 void addIdentifier(const Identifier
& i
) { return m_identifiers
.append(i
); }
737 Identifier
& identifier(int index
) { return m_identifiers
[index
]; }
739 size_t numberOfConstantRegisters() const { return m_constantRegisters
.size(); }
740 unsigned addConstant(JSValue v
)
742 unsigned result
= m_constantRegisters
.size();
743 m_constantRegisters
.append(WriteBarrier
<Unknown
>());
744 m_constantRegisters
.last().set(m_globalObject
->globalData(), m_ownerExecutable
.get(), v
);
747 unsigned addOrFindConstant(JSValue
);
748 WriteBarrier
<Unknown
>& constantRegister(int index
) { return m_constantRegisters
[index
- FirstConstantRegisterIndex
]; }
749 ALWAYS_INLINE
bool isConstantRegisterIndex(int index
) const { return index
>= FirstConstantRegisterIndex
; }
750 ALWAYS_INLINE JSValue
getConstant(int index
) const { return m_constantRegisters
[index
- FirstConstantRegisterIndex
].get(); }
752 unsigned addFunctionDecl(FunctionExecutable
* n
)
754 unsigned size
= m_functionDecls
.size();
755 m_functionDecls
.append(WriteBarrier
<FunctionExecutable
>());
756 m_functionDecls
.last().set(m_globalObject
->globalData(), m_ownerExecutable
.get(), n
);
759 FunctionExecutable
* functionDecl(int index
) { return m_functionDecls
[index
].get(); }
760 int numberOfFunctionDecls() { return m_functionDecls
.size(); }
761 unsigned addFunctionExpr(FunctionExecutable
* n
)
763 unsigned size
= m_functionExprs
.size();
764 m_functionExprs
.append(WriteBarrier
<FunctionExecutable
>());
765 m_functionExprs
.last().set(m_globalObject
->globalData(), m_ownerExecutable
.get(), n
);
768 FunctionExecutable
* functionExpr(int index
) { return m_functionExprs
[index
].get(); }
770 unsigned addRegExp(RegExp
* r
)
772 createRareDataIfNecessary();
773 unsigned size
= m_rareData
->m_regexps
.size();
774 m_rareData
->m_regexps
.append(WriteBarrier
<RegExp
>(*m_globalData
, ownerExecutable(), r
));
777 unsigned numberOfRegExps() const
781 return m_rareData
->m_regexps
.size();
783 RegExp
* regexp(int index
) const { ASSERT(m_rareData
); return m_rareData
->m_regexps
[index
].get(); }
785 unsigned addConstantBuffer(unsigned length
)
787 createRareDataIfNecessary();
788 unsigned size
= m_rareData
->m_constantBuffers
.size();
789 m_rareData
->m_constantBuffers
.append(Vector
<JSValue
>(length
));
793 JSValue
* constantBuffer(unsigned index
)
796 return m_rareData
->m_constantBuffers
[index
].data();
799 JSGlobalObject
* globalObject() { return m_globalObject
.get(); }
801 JSGlobalObject
* globalObjectFor(CodeOrigin codeOrigin
)
803 if (!codeOrigin
.inlineCallFrame
)
804 return globalObject();
805 // FIXME: if we ever inline based on executable not function, this code will need to change.
806 return codeOrigin
.inlineCallFrame
->callee
->scope()->globalObject
.get();
811 size_t numberOfImmediateSwitchJumpTables() const { return m_rareData
? m_rareData
->m_immediateSwitchJumpTables
.size() : 0; }
812 SimpleJumpTable
& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_immediateSwitchJumpTables
.append(SimpleJumpTable()); return m_rareData
->m_immediateSwitchJumpTables
.last(); }
813 SimpleJumpTable
& immediateSwitchJumpTable(int tableIndex
) { ASSERT(m_rareData
); return m_rareData
->m_immediateSwitchJumpTables
[tableIndex
]; }
815 size_t numberOfCharacterSwitchJumpTables() const { return m_rareData
? m_rareData
->m_characterSwitchJumpTables
.size() : 0; }
816 SimpleJumpTable
& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_characterSwitchJumpTables
.append(SimpleJumpTable()); return m_rareData
->m_characterSwitchJumpTables
.last(); }
817 SimpleJumpTable
& characterSwitchJumpTable(int tableIndex
) { ASSERT(m_rareData
); return m_rareData
->m_characterSwitchJumpTables
[tableIndex
]; }
819 size_t numberOfStringSwitchJumpTables() const { return m_rareData
? m_rareData
->m_stringSwitchJumpTables
.size() : 0; }
820 StringJumpTable
& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData
->m_stringSwitchJumpTables
.append(StringJumpTable()); return m_rareData
->m_stringSwitchJumpTables
.last(); }
821 StringJumpTable
& stringSwitchJumpTable(int tableIndex
) { ASSERT(m_rareData
); return m_rareData
->m_stringSwitchJumpTables
[tableIndex
]; }
824 SymbolTable
* symbolTable() { return m_symbolTable
; }
825 SharedSymbolTable
* sharedSymbolTable() { ASSERT(m_codeType
== FunctionCode
); return static_cast<SharedSymbolTable
*>(m_symbolTable
); }
827 EvalCodeCache
& evalCodeCache() { createRareDataIfNecessary(); return m_rareData
->m_evalCodeCache
; }
831 void copyPostParseDataFrom(CodeBlock
* alternative
);
832 void copyPostParseDataFromAlternative();
834 // Functions for controlling when JITting kicks in, in a mixed mode
837 bool checkIfJITThresholdReached()
839 return m_llintExecuteCounter
.checkIfThresholdCrossedAndSet(this);
842 void dontJITAnytimeSoon()
844 m_llintExecuteCounter
.deferIndefinitely();
847 void jitAfterWarmUp()
849 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITAfterWarmUp
, this);
854 m_llintExecuteCounter
.setNewThreshold(Options::thresholdForJITSoon
, this);
857 int32_t llintExecuteCounter() const
859 return m_llintExecuteCounter
.m_counter
;
862 // Functions for controlling when tiered compilation kicks in. This
863 // controls both when the optimizing compiler is invoked and when OSR
864 // entry happens. Two triggers exist: the loop trigger and the return
865 // trigger. In either case, when an addition to m_jitExecuteCounter
866 // causes it to become non-negative, the optimizing compiler is
867 // invoked. This includes a fast check to see if this CodeBlock has
868 // already been optimized (i.e. replacement() returns a CodeBlock
869 // that was optimized with a higher tier JIT than this one). In the
870 // case of the loop trigger, if the optimized compilation succeeds
871 // (or has already succeeded in the past) then OSR is attempted to
872 // redirect program flow into the optimized code.
874 // These functions are called from within the optimization triggers,
875 // and are used as a single point at which we define the heuristics
876 // for how much warm-up is mandated before the next optimization
877 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
878 // as this is called from the CodeBlock constructor.
880 // When we observe a lot of speculation failures, we trigger a
881 // reoptimization. But each time, we increase the optimization trigger
882 // to avoid thrashing.
883 unsigned reoptimizationRetryCounter() const
885 ASSERT(m_reoptimizationRetryCounter
<= Options::reoptimizationRetryCounterMax
);
886 return m_reoptimizationRetryCounter
;
889 void countReoptimization()
891 m_reoptimizationRetryCounter
++;
892 if (m_reoptimizationRetryCounter
> Options::reoptimizationRetryCounterMax
)
893 m_reoptimizationRetryCounter
= Options::reoptimizationRetryCounterMax
;
896 int32_t counterValueForOptimizeAfterWarmUp()
898 return Options::thresholdForOptimizeAfterWarmUp
<< reoptimizationRetryCounter();
901 int32_t counterValueForOptimizeAfterLongWarmUp()
903 return Options::thresholdForOptimizeAfterLongWarmUp
<< reoptimizationRetryCounter();
906 int32_t* addressOfJITExecuteCounter()
908 return &m_jitExecuteCounter
.m_counter
;
911 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(ExecutionCounter
, m_counter
); }
912 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(ExecutionCounter
, m_activeThreshold
); }
913 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock
, m_jitExecuteCounter
) + OBJECT_OFFSETOF(ExecutionCounter
, m_totalCount
); }
915 int32_t jitExecuteCounter() const { return m_jitExecuteCounter
.m_counter
; }
917 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter
; }
919 // Check if the optimization threshold has been reached, and if not,
920 // adjust the heuristics accordingly. Returns true if the threshold has
922 bool checkIfOptimizationThresholdReached()
924 return m_jitExecuteCounter
.checkIfThresholdCrossedAndSet(this);
927 // Call this to force the next optimization trigger to fire. This is
928 // rarely wise, since optimization triggers are typically more
929 // expensive than executing baseline code.
930 void optimizeNextInvocation()
932 m_jitExecuteCounter
.setNewThreshold(0, this);
935 // Call this to prevent optimization from happening again. Note that
936 // optimization will still happen after roughly 2^29 invocations,
937 // so this is really meant to delay that as much as possible. This
938 // is called if optimization failed, and we expect it to fail in
939 // the future as well.
940 void dontOptimizeAnytimeSoon()
942 m_jitExecuteCounter
.deferIndefinitely();
945 // Call this to reinitialize the counter to its starting state,
946 // forcing a warm-up to happen before the next optimization trigger
947 // fires. This is called in the CodeBlock constructor. It also
948 // makes sense to call this if an OSR exit occurred. Note that
949 // OSR exit code is code generated, so the value of the execute
950 // counter that this corresponds to is also available directly.
951 void optimizeAfterWarmUp()
953 m_jitExecuteCounter
.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
956 // Call this to force an optimization trigger to fire only after
958 void optimizeAfterLongWarmUp()
960 m_jitExecuteCounter
.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
963 // Call this to cause an optimization trigger to fire soon, but
964 // not necessarily the next one. This makes sense if optimization
965 // succeeds. Successfuly optimization means that all calls are
966 // relinked to the optimized code, so this only affects call
967 // frames that are still executing this CodeBlock. The value here
968 // is tuned to strike a balance between the cost of OSR entry
969 // (which is too high to warrant making every loop back edge to
970 // trigger OSR immediately) and the cost of executing baseline
971 // code (which is high enough that we don't necessarily want to
972 // have a full warm-up). The intuition for calling this instead of
973 // optimizeNextInvocation() is for the case of recursive functions
974 // with loops. Consider that there may be N call frames of some
975 // recursive function, for a reasonably large value of N. The top
976 // one triggers optimization, and then returns, and then all of
977 // the others return. We don't want optimization to be triggered on
978 // each return, as that would be superfluous. It only makes sense
979 // to trigger optimization if one of those functions becomes hot
980 // in the baseline code.
983 m_jitExecuteCounter
.setNewThreshold(Options::thresholdForOptimizeSoon
<< reoptimizationRetryCounter(), this);
986 // The speculative JIT tracks its success rate, so that we can
987 // decide when to reoptimize. It's interesting to note that these
988 // counters may overflow without any protection. The success
989 // counter will overflow before the fail one does, becuase the
990 // fail one is used as a trigger to reoptimize. So the worst case
991 // is that the success counter overflows and we reoptimize without
992 // needing to. But this is harmless. If a method really did
993 // execute 2^32 times then compiling it again probably won't hurt
996 void countSpeculationSuccess()
998 m_speculativeSuccessCounter
++;
1001 void countSpeculationFailure()
1003 m_speculativeFailCounter
++;
1006 uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter
; }
1007 uint32_t speculativeFailCounter() const { return m_speculativeFailCounter
; }
1008 uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter
; }
1010 uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter
; }
1011 uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter
; }
1012 uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter
; }
1014 static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_speculativeSuccessCounter
); }
1015 static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_speculativeFailCounter
); }
1016 static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock
, m_forcedOSRExitCounter
); }
1019 // The number of failures that triggers the use of the ratio.
1020 unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase
<< baselineVersion()->reoptimizationRetryCounter(); }
1021 unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop
<< baselineVersion()->reoptimizationRetryCounter(); }
1023 bool shouldReoptimizeNow()
1025 return (Options::desiredSpeculativeSuccessFailRatio
*
1026 speculativeFailCounter() >= speculativeSuccessCounter()
1027 && speculativeFailCounter() >= largeFailCountThreshold())
1028 || forcedOSRExitCounter() >=
1029 Options::forcedOSRExitCountForReoptimization
;
1032 bool shouldReoptimizeFromLoopNow()
1034 return (Options::desiredSpeculativeSuccessFailRatio
*
1035 speculativeFailCounter() >= speculativeSuccessCounter()
1036 && speculativeFailCounter() >= largeFailCountThresholdForLoop())
1037 || forcedOSRExitCounter() >=
1038 Options::forcedOSRExitCountForReoptimization
;
1042 #if ENABLE(VALUE_PROFILER)
1043 bool shouldOptimizeNow();
1045 bool shouldOptimizeNow() { return false; }
1051 ASSERT(replacement() != this);
1052 ASSERT(replacement()->alternative() == this);
1053 replacement()->tallyFrequentExitSites();
1054 replacement()->jettison();
1055 countReoptimization();
1056 optimizeAfterWarmUp();
1060 #if ENABLE(VERBOSE_VALUE_PROFILE)
1061 void dumpValueProfiles();
1064 // FIXME: Make these remaining members private.
1066 int m_numCalleeRegisters
;
1068 int m_numCapturedVars
;
1069 bool m_isConstructor
;
1073 virtual bool jitCompileImpl(JSGlobalData
&) = 0;
1075 virtual void visitWeakReferences(SlotVisitor
&);
1076 virtual void finalizeUnconditionally();
1079 friend class DFGCodeBlocks
;
1082 void tallyFrequentExitSites();
1084 void tallyFrequentExitSites() { }
1087 void dump(ExecState
*, const Vector
<Instruction
>::const_iterator
& begin
, Vector
<Instruction
>::const_iterator
&) const;
1089 CString
registerName(ExecState
*, int r
) const;
1090 void printUnaryOp(ExecState
*, int location
, Vector
<Instruction
>::const_iterator
&, const char* op
) const;
1091 void printBinaryOp(ExecState
*, int location
, Vector
<Instruction
>::const_iterator
&, const char* op
) const;
1092 void printConditionalJump(ExecState
*, const Vector
<Instruction
>::const_iterator
&, Vector
<Instruction
>::const_iterator
&, int location
, const char* op
) const;
1093 void printGetByIdOp(ExecState
*, int location
, Vector
<Instruction
>::const_iterator
&, const char* op
) const;
1094 void printCallOp(ExecState
*, int location
, Vector
<Instruction
>::const_iterator
&, const char* op
) const;
1095 void printPutByIdOp(ExecState
*, int location
, Vector
<Instruction
>::const_iterator
&, const char* op
) const;
1096 void visitStructures(SlotVisitor
&, Instruction
* vPC
) const;
1099 bool shouldImmediatelyAssumeLivenessDuringScan()
1101 // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
1102 // CodeBlocks don't need to be jettisoned when their weak references go
1103 // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
1104 // this means that it's live.
1108 // For simplicity, we don't attempt to jettison code blocks during GC if
1109 // they are executing. Instead we strongly mark their weak references to
1110 // allow them to continue to execute soundly.
1111 if (m_dfgData
->mayBeExecuting
)
1117 bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1120 void performTracingFixpointIteration(SlotVisitor
&);
1122 void stronglyVisitStrongReferences(SlotVisitor
&);
1123 void stronglyVisitWeakReferences(SlotVisitor
&);
1125 void createRareDataIfNecessary()
1128 m_rareData
= adoptPtr(new RareData
);
1131 int m_numParameters
;
1133 WriteBarrier
<ScriptExecutable
> m_ownerExecutable
;
1134 JSGlobalData
* m_globalData
;
1136 RefCountedArray
<Instruction
> m_instructions
;
1139 int m_argumentsRegister
;
1140 int m_activationRegister
;
1142 bool m_needsFullScopeChain
;
1144 bool m_isNumericCompareFunction
;
1145 bool m_isStrictMode
;
1147 CodeType m_codeType
;
1149 RefPtr
<SourceProvider
> m_source
;
1150 unsigned m_sourceOffset
;
1152 Vector
<unsigned> m_propertyAccessInstructions
;
1153 Vector
<unsigned> m_globalResolveInstructions
;
1155 SegmentedVector
<LLIntCallLinkInfo
, 8> m_llintCallLinkInfos
;
1156 SentinelLinkedList
<LLIntCallLinkInfo
, BasicRawSentinelNode
<LLIntCallLinkInfo
> > m_incomingLLIntCalls
;
1159 Vector
<StructureStubInfo
> m_structureStubInfos
;
1160 Vector
<GlobalResolveInfo
> m_globalResolveInfos
;
1161 Vector
<CallLinkInfo
> m_callLinkInfos
;
1162 Vector
<MethodCallLinkInfo
> m_methodCallLinkInfos
;
1164 MacroAssemblerCodePtr m_jitCodeWithArityCheck
;
1165 SentinelLinkedList
<CallLinkInfo
, BasicRawSentinelNode
<CallLinkInfo
> > m_incomingCalls
;
1167 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
1168 OwnPtr
<CompactJITCodeMap
> m_jitCodeMap
;
1171 struct WeakReferenceTransition
{
1172 WeakReferenceTransition() { }
1174 WeakReferenceTransition(JSGlobalData
& globalData
, JSCell
* owner
, JSCell
* codeOrigin
, JSCell
* from
, JSCell
* to
)
1175 : m_from(globalData
, owner
, from
)
1176 , m_to(globalData
, owner
, to
)
1179 m_codeOrigin
.set(globalData
, owner
, codeOrigin
);
1182 WriteBarrier
<JSCell
> m_codeOrigin
;
1183 WriteBarrier
<JSCell
> m_from
;
1184 WriteBarrier
<JSCell
> m_to
;
1189 : mayBeExecuting(false)
1190 , isJettisoned(false)
1194 Vector
<DFG::OSREntryData
> osrEntry
;
1195 SegmentedVector
<DFG::OSRExit
, 8> osrExit
;
1196 Vector
<DFG::SpeculationRecovery
> speculationRecovery
;
1197 Vector
<WeakReferenceTransition
> transitions
;
1198 Vector
<WriteBarrier
<JSCell
> > weakReferences
;
1199 bool mayBeExecuting
;
1201 bool livenessHasBeenProved
; // Initialized and used on every GC.
1202 bool allTransitionsHaveBeenMarked
; // Initialized and used on every GC.
1203 unsigned visitAggregateHasBeenCalled
; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
1206 OwnPtr
<DFGData
> m_dfgData
;
1208 // This is relevant to non-DFG code blocks that serve as the profiled code block
1209 // for DFG code blocks.
1210 DFG::ExitProfile m_exitProfile
;
1211 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles
;
1213 #if ENABLE(VALUE_PROFILER)
1214 Vector
<ValueProfile
> m_argumentValueProfiles
;
1215 SegmentedVector
<ValueProfile
, 8> m_valueProfiles
;
1216 SegmentedVector
<RareCaseProfile
, 8> m_rareCaseProfiles
;
1217 SegmentedVector
<RareCaseProfile
, 8> m_specialFastCaseProfiles
;
1218 unsigned m_executionEntryCount
;
1221 Vector
<unsigned> m_jumpTargets
;
1222 Vector
<unsigned> m_loopTargets
;
1225 Vector
<Identifier
> m_identifiers
;
1226 COMPILE_ASSERT(sizeof(Register
) == sizeof(WriteBarrier
<Unknown
>), Register_must_be_same_size_as_WriteBarrier_Unknown
);
1227 Vector
<WriteBarrier
<Unknown
> > m_constantRegisters
;
1228 Vector
<WriteBarrier
<FunctionExecutable
> > m_functionDecls
;
1229 Vector
<WriteBarrier
<FunctionExecutable
> > m_functionExprs
;
1231 SymbolTable
* m_symbolTable
;
1233 OwnPtr
<CodeBlock
> m_alternative
;
1235 ExecutionCounter m_llintExecuteCounter
;
1237 ExecutionCounter m_jitExecuteCounter
;
1238 int32_t m_totalJITExecutions
;
1239 uint32_t m_speculativeSuccessCounter
;
1240 uint32_t m_speculativeFailCounter
;
1241 uint32_t m_forcedOSRExitCounter
;
1242 uint16_t m_optimizationDelayCounter
;
1243 uint16_t m_reoptimizationRetryCounter
;
1246 WTF_MAKE_FAST_ALLOCATED
;
1248 Vector
<HandlerInfo
> m_exceptionHandlers
;
1251 Vector
<WriteBarrier
<RegExp
> > m_regexps
;
1253 // Buffers used for large array literals
1254 Vector
<Vector
<JSValue
> > m_constantBuffers
;
1257 Vector
<SimpleJumpTable
> m_immediateSwitchJumpTables
;
1258 Vector
<SimpleJumpTable
> m_characterSwitchJumpTables
;
1259 Vector
<StringJumpTable
> m_stringSwitchJumpTables
;
1261 EvalCodeCache m_evalCodeCache
;
1263 // Expression info - present if debugging.
1264 Vector
<ExpressionRangeInfo
> m_expressionInfo
;
1265 // Line info - present if profiling or debugging.
1266 Vector
<LineInfo
> m_lineInfo
;
1268 Vector
<CallReturnOffsetToBytecodeOffset
> m_callReturnIndexVector
;
1271 SegmentedVector
<InlineCallFrame
, 4> m_inlineCallFrames
;
1272 Vector
<CodeOriginAtCallReturnOffset
> m_codeOrigins
;
1276 friend void WTF::deleteOwnedPtr
<RareData
>(RareData
*);
1278 OwnPtr
<RareData
> m_rareData
;
1280 CompileWithDFGState m_canCompileWithDFGState
;
1284 // Program code is not marked by any function, so we make the global object
1285 // responsible for marking it.
1287 class GlobalCodeBlock
: public CodeBlock
{
1289 GlobalCodeBlock(CopyParsedBlockTag
, GlobalCodeBlock
& other
)
1290 : CodeBlock(CopyParsedBlock
, other
, &m_unsharedSymbolTable
)
1291 , m_unsharedSymbolTable(other
.m_unsharedSymbolTable
)
1295 GlobalCodeBlock(ScriptExecutable
* ownerExecutable
, CodeType codeType
, JSGlobalObject
* globalObject
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, PassOwnPtr
<CodeBlock
> alternative
)
1296 : CodeBlock(ownerExecutable
, codeType
, globalObject
, sourceProvider
, sourceOffset
, &m_unsharedSymbolTable
, false, alternative
)
1301 SymbolTable m_unsharedSymbolTable
;
1304 class ProgramCodeBlock
: public GlobalCodeBlock
{
1306 ProgramCodeBlock(CopyParsedBlockTag
, ProgramCodeBlock
& other
)
1307 : GlobalCodeBlock(CopyParsedBlock
, other
)
1311 ProgramCodeBlock(ProgramExecutable
* ownerExecutable
, CodeType codeType
, JSGlobalObject
* globalObject
, PassRefPtr
<SourceProvider
> sourceProvider
, PassOwnPtr
<CodeBlock
> alternative
)
1312 : GlobalCodeBlock(ownerExecutable
, codeType
, globalObject
, sourceProvider
, 0, alternative
)
1318 virtual JSObject
* compileOptimized(ExecState
*, ScopeChainNode
*);
1319 virtual void jettison();
1320 virtual bool jitCompileImpl(JSGlobalData
&);
1321 virtual CodeBlock
* replacement();
1322 virtual bool canCompileWithDFGInternal();
1326 class EvalCodeBlock
: public GlobalCodeBlock
{
1328 EvalCodeBlock(CopyParsedBlockTag
, EvalCodeBlock
& other
)
1329 : GlobalCodeBlock(CopyParsedBlock
, other
)
1330 , m_baseScopeDepth(other
.m_baseScopeDepth
)
1331 , m_variables(other
.m_variables
)
1335 EvalCodeBlock(EvalExecutable
* ownerExecutable
, JSGlobalObject
* globalObject
, PassRefPtr
<SourceProvider
> sourceProvider
, int baseScopeDepth
, PassOwnPtr
<CodeBlock
> alternative
)
1336 : GlobalCodeBlock(ownerExecutable
, EvalCode
, globalObject
, sourceProvider
, 0, alternative
)
1337 , m_baseScopeDepth(baseScopeDepth
)
1341 int baseScopeDepth() const { return m_baseScopeDepth
; }
1343 const Identifier
& variable(unsigned index
) { return m_variables
[index
]; }
1344 unsigned numVariables() { return m_variables
.size(); }
1345 void adoptVariables(Vector
<Identifier
>& variables
)
1347 ASSERT(m_variables
.isEmpty());
1348 m_variables
.swap(variables
);
1353 virtual JSObject
* compileOptimized(ExecState
*, ScopeChainNode
*);
1354 virtual void jettison();
1355 virtual bool jitCompileImpl(JSGlobalData
&);
1356 virtual CodeBlock
* replacement();
1357 virtual bool canCompileWithDFGInternal();
1361 int m_baseScopeDepth
;
1362 Vector
<Identifier
> m_variables
;
1365 class FunctionCodeBlock
: public CodeBlock
{
1367 FunctionCodeBlock(CopyParsedBlockTag
, FunctionCodeBlock
& other
)
1368 : CodeBlock(CopyParsedBlock
, other
, other
.sharedSymbolTable())
1370 // The fact that we have to do this is yucky, but is necessary because of the
1371 // class hierarchy issues described in the comment block for the main
1372 // constructor, below.
1373 sharedSymbolTable()->ref();
1376 // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
1377 // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
1378 // symbol table, so we just pass as a raw pointer with a ref count of 1. We then manually deref
1379 // in the destructor.
1380 FunctionCodeBlock(FunctionExecutable
* ownerExecutable
, CodeType codeType
, JSGlobalObject
* globalObject
, PassRefPtr
<SourceProvider
> sourceProvider
, unsigned sourceOffset
, bool isConstructor
, PassOwnPtr
<CodeBlock
> alternative
= nullptr)
1381 : CodeBlock(ownerExecutable
, codeType
, globalObject
, sourceProvider
, sourceOffset
, SharedSymbolTable::create().leakRef(), isConstructor
, alternative
)
1384 ~FunctionCodeBlock()
1386 sharedSymbolTable()->deref();
1391 virtual JSObject
* compileOptimized(ExecState
*, ScopeChainNode
*);
1392 virtual void jettison();
1393 virtual bool jitCompileImpl(JSGlobalData
&);
1394 virtual CodeBlock
* replacement();
1395 virtual bool canCompileWithDFGInternal();
1399 inline CodeBlock
* baselineCodeBlockForInlineCallFrame(InlineCallFrame
* inlineCallFrame
)
1401 ASSERT(inlineCallFrame
);
1402 ExecutableBase
* executable
= inlineCallFrame
->executable
.get();
1403 ASSERT(executable
->structure()->classInfo() == &FunctionExecutable::s_info
);
1404 return static_cast<FunctionExecutable
*>(executable
)->baselineCodeBlockFor(inlineCallFrame
->isCall
? CodeForCall
: CodeForConstruct
);
1407 inline CodeBlock
* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin
& codeOrigin
, CodeBlock
* baselineCodeBlock
)
1409 if (codeOrigin
.inlineCallFrame
)
1410 return baselineCodeBlockForInlineCallFrame(codeOrigin
.inlineCallFrame
);
1411 return baselineCodeBlock
;
1415 inline Register
& ExecState::r(int index
)
1417 CodeBlock
* codeBlock
= this->codeBlock();
1418 if (codeBlock
->isConstantRegisterIndex(index
))
1419 return *reinterpret_cast<Register
*>(&codeBlock
->constantRegister(index
));
1423 inline Register
& ExecState::uncheckedR(int index
)
1425 ASSERT(index
< FirstConstantRegisterIndex
);
1430 inline bool ExecState::isInlineCallFrame()
1432 if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT
))
1434 return isInlineCallFrameSlow();
1439 inline void DFGCodeBlocks::mark(void* candidateCodeBlock
)
1441 // We have to check for 0 and -1 because those are used by the HashMap as markers.
1442 uintptr_t value
= reinterpret_cast<uintptr_t>(candidateCodeBlock
);
1444 // This checks for both of those nasty cases in one go.
1450 HashSet
<CodeBlock
*>::iterator iter
= m_set
.find(static_cast<CodeBlock
*>(candidateCodeBlock
));
1451 if (iter
== m_set
.end())
1454 (*iter
)->m_dfgData
->mayBeExecuting
= true;
1460 #endif // CodeBlock_h