]>
Commit | Line | Data |
---|---|---|
9dae56ea | 1 | /* |
81345200 | 2 | * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. |
9dae56ea A |
3 | * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * | |
9 | * 1. Redistributions of source code must retain the above copyright | |
10 | * notice, this list of conditions and the following disclaimer. | |
11 | * 2. Redistributions in binary form must reproduce the above copyright | |
12 | * notice, this list of conditions and the following disclaimer in the | |
13 | * documentation and/or other materials provided with the distribution. | |
81345200 | 14 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
9dae56ea A |
15 | * its contributors may be used to endorse or promote products derived |
16 | * from this software without specific prior written permission. | |
17 | * | |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | */ | |
29 | ||
30 | #ifndef CodeBlock_h | |
31 | #define CodeBlock_h | |
32 | ||
93a37866 A |
33 | #include "ArrayProfile.h" |
34 | #include "ByValInfo.h" | |
6fe7ccc8 | 35 | #include "BytecodeConventions.h" |
81345200 | 36 | #include "BytecodeLivenessAnalysis.h" |
6fe7ccc8 A |
37 | #include "CallLinkInfo.h" |
38 | #include "CallReturnOffsetToBytecodeOffset.h" | |
93a37866 | 39 | #include "CodeBlockHash.h" |
81345200 A |
40 | #include "CodeBlockSet.h" |
41 | #include "ConcurrentJITLock.h" | |
6fe7ccc8 A |
42 | #include "CodeOrigin.h" |
43 | #include "CodeType.h" | |
44 | #include "CompactJITCodeMap.h" | |
93a37866 | 45 | #include "DFGCommon.h" |
81345200 | 46 | #include "DFGCommonData.h" |
6fe7ccc8 | 47 | #include "DFGExitProfile.h" |
81345200 | 48 | #include "DeferredCompilationCallback.h" |
9dae56ea | 49 | #include "EvalCodeCache.h" |
6fe7ccc8 A |
50 | #include "ExecutionCounter.h" |
51 | #include "ExpressionRangeInfo.h" | |
6fe7ccc8 | 52 | #include "HandlerInfo.h" |
93a37866 | 53 | #include "ObjectAllocationProfile.h" |
6fe7ccc8 | 54 | #include "Options.h" |
81345200 | 55 | #include "PutPropertySlot.h" |
9dae56ea | 56 | #include "Instruction.h" |
ba379fdc | 57 | #include "JITCode.h" |
14957cd0 | 58 | #include "JITWriteBarrier.h" |
9dae56ea A |
59 | #include "JSGlobalObject.h" |
60 | #include "JumpTable.h" | |
6fe7ccc8 A |
61 | #include "LLIntCallLinkInfo.h" |
62 | #include "LazyOperandValueProfile.h" | |
93a37866 | 63 | #include "ProfilerCompilation.h" |
81345200 | 64 | #include "ProfilerJettisonReason.h" |
14957cd0 | 65 | #include "RegExpObject.h" |
6fe7ccc8 | 66 | #include "StructureStubInfo.h" |
6fe7ccc8 A |
67 | #include "UnconditionalFinalizer.h" |
68 | #include "ValueProfile.h" | |
81345200 | 69 | #include "VirtualRegister.h" |
93a37866 | 70 | #include "Watchpoint.h" |
81345200 A |
71 | #include <wtf/Bag.h> |
72 | #include <wtf/FastMalloc.h> | |
14957cd0 | 73 | #include <wtf/PassOwnPtr.h> |
81345200 | 74 | #include <wtf/RefCountedArray.h> |
9dae56ea | 75 | #include <wtf/RefPtr.h> |
6fe7ccc8 | 76 | #include <wtf/SegmentedVector.h> |
9dae56ea | 77 | #include <wtf/Vector.h> |
93a37866 | 78 | #include <wtf/text/WTFString.h> |
ba379fdc | 79 | |
9dae56ea A |
80 | namespace JSC { |
81 | ||
93a37866 A |
82 | class ExecState; |
83 | class LLIntOffsetsExtractor; | |
84 | class RepatchBuffer; | |
9dae56ea | 85 | |
81345200 | 86 | inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); } |
14957cd0 | 87 | |
93a37866 | 88 | static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); } |
9dae56ea | 89 | |
81345200 A |
90 | enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; |
91 | ||
92 | class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester { | |
93a37866 | 93 | WTF_MAKE_FAST_ALLOCATED; |
81345200 | 94 | friend class BytecodeLivenessAnalysis; |
93a37866 A |
95 | friend class JIT; |
96 | friend class LLIntOffsetsExtractor; | |
97 | public: | |
98 | enum CopyParsedBlockTag { CopyParsedBlock }; | |
99 | protected: | |
100 | CodeBlock(CopyParsedBlockTag, CodeBlock& other); | |
6fe7ccc8 | 101 | |
81345200 | 102 | CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset); |
14957cd0 | 103 | |
93a37866 A |
104 | WriteBarrier<JSGlobalObject> m_globalObject; |
105 | Heap* m_heap; | |
14957cd0 | 106 | |
93a37866 A |
107 | public: |
108 | JS_EXPORT_PRIVATE virtual ~CodeBlock(); | |
81345200 | 109 | |
93a37866 | 110 | UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); } |
81345200 A |
111 | |
112 | CString inferredName() const; | |
93a37866 | 113 | CodeBlockHash hash() const; |
81345200 A |
114 | bool hasHash() const; |
115 | bool isSafeToComputeHash() const; | |
116 | CString hashAsStringIfPossible() const; | |
117 | CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. | |
118 | CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. | |
93a37866 A |
119 | void dumpAssumingJITType(PrintStream&, JITCode::JITType) const; |
120 | void dump(PrintStream&) const; | |
81345200 | 121 | |
93a37866 A |
122 | int numParameters() const { return m_numParameters; } |
123 | void setNumParameters(int newValue); | |
81345200 | 124 | |
93a37866 A |
125 | int* addressOfNumParameters() { return &m_numParameters; } |
126 | static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } | |
9dae56ea | 127 | |
93a37866 | 128 | CodeBlock* alternative() { return m_alternative.get(); } |
81345200 A |
129 | PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); } |
130 | void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; } | |
131 | ||
132 | template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor) | |
93a37866 | 133 | { |
81345200 A |
134 | Functor f(std::forward<Functor>(functor)); |
135 | Vector<CodeBlock*, 4> codeBlocks; | |
136 | codeBlocks.append(this); | |
137 | ||
138 | while (!codeBlocks.isEmpty()) { | |
139 | CodeBlock* currentCodeBlock = codeBlocks.takeLast(); | |
140 | f(currentCodeBlock); | |
141 | ||
142 | if (CodeBlock* alternative = currentCodeBlock->alternative()) | |
143 | codeBlocks.append(alternative); | |
144 | if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull()) | |
145 | codeBlocks.append(osrEntryBlock); | |
146 | } | |
93a37866 | 147 | } |
81345200 A |
148 | |
149 | CodeSpecializationKind specializationKind() const | |
93a37866 | 150 | { |
81345200 | 151 | return specializationFromIsConstruct(m_isConstructor); |
93a37866 | 152 | } |
81345200 A |
153 | |
154 | CodeBlock* baselineAlternative(); | |
155 | ||
156 | // FIXME: Get rid of this. | |
157 | // https://bugs.webkit.org/show_bug.cgi?id=123677 | |
158 | CodeBlock* baselineVersion(); | |
9dae56ea | 159 | |
93a37866 | 160 | void visitAggregate(SlotVisitor&); |
9dae56ea | 161 | |
93a37866 | 162 | void dumpBytecode(PrintStream& = WTF::dataFile()); |
81345200 A |
163 | void dumpBytecode( |
164 | PrintStream&, unsigned bytecodeOffset, | |
165 | const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); | |
93a37866 A |
166 | void printStructures(PrintStream&, const Instruction*); |
167 | void printStructure(PrintStream&, const char* name, const Instruction*, int operand); | |
14957cd0 | 168 | |
93a37866 | 169 | bool isStrictMode() const { return m_isStrictMode; } |
81345200 | 170 | ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; } |
9dae56ea | 171 | |
93a37866 A |
172 | inline bool isKnownNotImmediate(int index) |
173 | { | |
81345200 | 174 | if (index == m_thisRegister.offset() && !m_isStrictMode) |
93a37866 | 175 | return true; |
9dae56ea | 176 | |
93a37866 A |
177 | if (isConstantRegisterIndex(index)) |
178 | return getConstant(index).isCell(); | |
9dae56ea | 179 | |
93a37866 A |
180 | return false; |
181 | } | |
9dae56ea | 182 | |
93a37866 A |
183 | ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) |
184 | { | |
185 | return index >= m_numVars; | |
186 | } | |
9dae56ea | 187 | |
93a37866 A |
188 | HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); |
189 | unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset); | |
190 | unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset); | |
191 | void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, | |
81345200 | 192 | int& startOffset, int& endOffset, unsigned& line, unsigned& column); |
9dae56ea | 193 | |
81345200 A |
194 | void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result); |
195 | void getStubInfoMap(StubInfoMap& result); | |
196 | ||
197 | void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result); | |
198 | void getCallLinkInfoMap(CallLinkInfoMap& result); | |
199 | ||
93a37866 | 200 | #if ENABLE(JIT) |
81345200 A |
201 | StructureStubInfo* addStubInfo(); |
202 | Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); } | |
203 | Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); } | |
9dae56ea | 204 | |
93a37866 | 205 | void resetStub(StructureStubInfo&); |
81345200 | 206 | |
93a37866 A |
207 | ByValInfo& getByValInfo(unsigned bytecodeIndex) |
208 | { | |
209 | return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex)); | |
210 | } | |
ba379fdc | 211 | |
81345200 A |
212 | CallLinkInfo* addCallLinkInfo(); |
213 | Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); } | |
214 | Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); } | |
215 | ||
216 | // This is a slow function call used primarily for compiling OSR exits in the case | |
217 | // that there had been inlining. Chances are if you want to use this, you're really | |
218 | // looking for a CallLinkInfoMap to amortize the cost of calling this. | |
219 | CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex); | |
93a37866 | 220 | #endif // ENABLE(JIT) |
6fe7ccc8 | 221 | |
81345200 | 222 | void unlinkIncomingCalls(); |
6fe7ccc8 | 223 | |
93a37866 | 224 | #if ENABLE(JIT) |
93a37866 | 225 | void unlinkCalls(); |
6fe7ccc8 | 226 | |
81345200 | 227 | void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*); |
93a37866 A |
228 | |
229 | bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming) | |
230 | { | |
231 | return m_incomingCalls.isOnList(incoming); | |
232 | } | |
233 | #endif // ENABLE(JIT) | |
234 | ||
81345200 | 235 | void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*); |
6fe7ccc8 | 236 | |
93a37866 A |
237 | void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap) |
238 | { | |
239 | m_jitCodeMap = jitCodeMap; | |
240 | } | |
241 | CompactJITCodeMap* jitCodeMap() | |
242 | { | |
243 | return m_jitCodeMap.get(); | |
244 | } | |
81345200 | 245 | |
93a37866 A |
246 | unsigned bytecodeOffset(Instruction* returnAddress) |
247 | { | |
248 | RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); | |
249 | return static_cast<Instruction*>(returnAddress) - instructions().begin(); | |
250 | } | |
9dae56ea | 251 | |
93a37866 | 252 | bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); } |
9dae56ea | 253 | |
93a37866 A |
254 | unsigned numberOfInstructions() const { return m_instructions.size(); } |
255 | RefCountedArray<Instruction>& instructions() { return m_instructions; } | |
256 | const RefCountedArray<Instruction>& instructions() const { return m_instructions; } | |
81345200 | 257 | |
93a37866 | 258 | size_t predictedMachineCodeSize(); |
81345200 | 259 | |
93a37866 A |
260 | bool usesOpcode(OpcodeID); |
261 | ||
81345200 | 262 | unsigned instructionCount() const { return m_instructions.size(); } |
f9bf01c6 | 263 | |
93a37866 | 264 | int argumentIndexAfterCapture(size_t argument); |
81345200 A |
265 | |
266 | bool hasSlowArguments(); | |
267 | const SlowArgument* machineSlowArguments(); | |
9dae56ea | 268 | |
81345200 A |
269 | // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock); |
270 | void install(); | |
271 | ||
272 | // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) | |
273 | PassRefPtr<CodeBlock> newReplacement(); | |
274 | ||
275 | void setJITCode(PassRefPtr<JITCode> code) | |
93a37866 | 276 | { |
81345200 A |
277 | ASSERT(m_heap->isDeferred()); |
278 | m_heap->reportExtraMemoryCost(code->size()); | |
279 | ConcurrentJITLocker locker(m_lock); | |
280 | WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid. | |
93a37866 | 281 | m_jitCode = code; |
93a37866 | 282 | } |
81345200 A |
283 | PassRefPtr<JITCode> jitCode() { return m_jitCode; } |
284 | JITCode::JITType jitType() const | |
93a37866 | 285 | { |
81345200 A |
286 | JITCode* jitCode = m_jitCode.get(); |
287 | WTF::loadLoadFence(); | |
288 | JITCode::JITType result = JITCode::jitTypeFor(jitCode); | |
289 | WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good. | |
93a37866 A |
290 | return result; |
291 | } | |
6fe7ccc8 | 292 | |
81345200 | 293 | bool hasBaselineJITProfiling() const |
93a37866 | 294 | { |
81345200 | 295 | return jitType() == JITCode::BaselineJIT; |
93a37866 | 296 | } |
81345200 A |
297 | |
298 | #if ENABLE(JIT) | |
299 | virtual CodeBlock* replacement() = 0; | |
300 | ||
301 | virtual DFG::CapabilityLevel capabilityLevelInternal() = 0; | |
302 | DFG::CapabilityLevel capabilityLevel(); | |
303 | DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; } | |
304 | ||
305 | bool hasOptimizedReplacement(JITCode::JITType typeToReplace); | |
306 | bool hasOptimizedReplacement(); // the typeToReplace is my JITType | |
9dae56ea A |
307 | #endif |
308 | ||
81345200 A |
309 | void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization); |
310 | ||
93a37866 | 311 | ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); } |
9dae56ea | 312 | |
93a37866 A |
313 | void setVM(VM* vm) { m_vm = vm; } |
314 | VM* vm() { return m_vm; } | |
9dae56ea | 315 | |
81345200 A |
316 | void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; } |
317 | VirtualRegister thisRegister() const { return m_thisRegister; } | |
9dae56ea | 318 | |
93a37866 | 319 | bool usesEval() const { return m_unlinkedCode->usesEval(); } |
81345200 A |
320 | |
321 | void setArgumentsRegister(VirtualRegister argumentsRegister) | |
93a37866 | 322 | { |
81345200 | 323 | ASSERT(argumentsRegister.isValid()); |
93a37866 A |
324 | m_argumentsRegister = argumentsRegister; |
325 | ASSERT(usesArguments()); | |
326 | } | |
81345200 | 327 | VirtualRegister argumentsRegister() const |
93a37866 A |
328 | { |
329 | ASSERT(usesArguments()); | |
330 | return m_argumentsRegister; | |
331 | } | |
81345200 | 332 | VirtualRegister uncheckedArgumentsRegister() |
93a37866 A |
333 | { |
334 | if (!usesArguments()) | |
81345200 | 335 | return VirtualRegister(); |
93a37866 A |
336 | return argumentsRegister(); |
337 | } | |
81345200 | 338 | void setActivationRegister(VirtualRegister activationRegister) |
93a37866 A |
339 | { |
340 | m_activationRegister = activationRegister; | |
341 | } | |
81345200 A |
342 | |
343 | VirtualRegister activationRegister() const | |
93a37866 | 344 | { |
81345200 | 345 | ASSERT(m_activationRegister.isValid()); |
93a37866 A |
346 | return m_activationRegister; |
347 | } | |
81345200 A |
348 | |
349 | VirtualRegister uncheckedActivationRegister() | |
93a37866 | 350 | { |
81345200 | 351 | return m_activationRegister; |
93a37866 | 352 | } |
81345200 A |
353 | |
354 | bool usesArguments() const { return m_argumentsRegister.isValid(); } | |
355 | ||
93a37866 A |
356 | bool needsActivation() const |
357 | { | |
81345200 A |
358 | ASSERT(m_activationRegister.isValid() == m_needsActivation); |
359 | return m_needsActivation; | |
93a37866 | 360 | } |
81345200 A |
361 | |
362 | unsigned captureCount() const | |
93a37866 | 363 | { |
93a37866 | 364 | if (!symbolTable()) |
81345200 A |
365 | return 0; |
366 | return symbolTable()->captureCount(); | |
367 | } | |
368 | ||
369 | int captureStart() const | |
370 | { | |
371 | if (!symbolTable()) | |
372 | return 0; | |
373 | return symbolTable()->captureStart(); | |
374 | } | |
375 | ||
376 | int captureEnd() const | |
377 | { | |
378 | if (!symbolTable()) | |
379 | return 0; | |
380 | return symbolTable()->captureEnd(); | |
93a37866 A |
381 | } |
382 | ||
81345200 A |
383 | bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const; |
384 | ||
385 | int framePointerOffsetToGetActivationRegisters(int machineCaptureStart); | |
386 | int framePointerOffsetToGetActivationRegisters(); | |
387 | ||
93a37866 | 388 | CodeType codeType() const { return m_unlinkedCode->codeType(); } |
81345200 A |
389 | PutPropertySlot::Context putByIdContext() const |
390 | { | |
391 | if (codeType() == EvalCode) | |
392 | return PutPropertySlot::PutByIdEval; | |
393 | return PutPropertySlot::PutById; | |
394 | } | |
9dae56ea | 395 | |
93a37866 A |
396 | SourceProvider* source() const { return m_source.get(); } |
397 | unsigned sourceOffset() const { return m_sourceOffset; } | |
398 | unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; } | |
9dae56ea | 399 | |
93a37866 A |
400 | size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); } |
401 | unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); } | |
402 | ||
93a37866 | 403 | void clearEvalCache(); |
81345200 A |
404 | |
405 | String nameForRegister(VirtualRegister); | |
93a37866 A |
406 | |
407 | #if ENABLE(JIT) | |
81345200 | 408 | void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); } |
93a37866 A |
409 | size_t numberOfByValInfos() const { return m_byValInfos.size(); } |
410 | ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; } | |
6fe7ccc8 | 411 | #endif |
81345200 | 412 | |
93a37866 A |
413 | unsigned numberOfArgumentValueProfiles() |
414 | { | |
415 | ASSERT(m_numParameters >= 0); | |
416 | ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters)); | |
417 | return m_argumentValueProfiles.size(); | |
418 | } | |
419 | ValueProfile* valueProfileForArgument(unsigned argumentIndex) | |
420 | { | |
421 | ValueProfile* result = &m_argumentValueProfiles[argumentIndex]; | |
422 | ASSERT(result->m_bytecodeOffset == -1); | |
423 | return result; | |
424 | } | |
425 | ||
426 | unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } | |
427 | ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; } | |
428 | ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset) | |
429 | { | |
430 | ValueProfile* result = binarySearch<ValueProfile, int>( | |
431 | m_valueProfiles, m_valueProfiles.size(), bytecodeOffset, | |
432 | getValueProfileBytecodeOffset<ValueProfile>); | |
433 | ASSERT(result->m_bytecodeOffset != -1); | |
434 | ASSERT(instructions()[bytecodeOffset + opcodeLength( | |
435 | m_vm->interpreter->getOpcodeID( | |
81345200 | 436 | instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result); |
93a37866 A |
437 | return result; |
438 | } | |
81345200 | 439 | SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset) |
93a37866 | 440 | { |
81345200 | 441 | return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker); |
93a37866 | 442 | } |
81345200 | 443 | |
93a37866 A |
444 | unsigned totalNumberOfValueProfiles() |
445 | { | |
446 | return numberOfArgumentValueProfiles() + numberOfValueProfiles(); | |
447 | } | |
448 | ValueProfile* getFromAllValueProfiles(unsigned index) | |
449 | { | |
450 | if (index < numberOfArgumentValueProfiles()) | |
451 | return valueProfileForArgument(index); | |
452 | return valueProfile(index - numberOfArgumentValueProfiles()); | |
453 | } | |
81345200 | 454 | |
93a37866 A |
455 | RareCaseProfile* addRareCaseProfile(int bytecodeOffset) |
456 | { | |
457 | m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); | |
458 | return &m_rareCaseProfiles.last(); | |
459 | } | |
460 | unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } | |
461 | RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; } | |
81345200 A |
462 | RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset); |
463 | ||
93a37866 A |
464 | bool likelyToTakeSlowCase(int bytecodeOffset) |
465 | { | |
81345200 | 466 | if (!hasBaselineJITProfiling()) |
93a37866 A |
467 | return false; |
468 | unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
469 | return value >= Options::likelyToTakeSlowCaseMinimumCount(); | |
470 | } | |
81345200 | 471 | |
93a37866 A |
472 | bool couldTakeSlowCase(int bytecodeOffset) |
473 | { | |
81345200 | 474 | if (!hasBaselineJITProfiling()) |
93a37866 A |
475 | return false; |
476 | unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
477 | return value >= Options::couldTakeSlowCaseMinimumCount(); | |
478 | } | |
81345200 | 479 | |
93a37866 A |
480 | RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) |
481 | { | |
482 | m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset)); | |
483 | return &m_specialFastCaseProfiles.last(); | |
484 | } | |
485 | unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); } | |
486 | RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; } | |
487 | RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset) | |
488 | { | |
489 | return tryBinarySearch<RareCaseProfile, int>( | |
490 | m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset, | |
491 | getRareCaseProfileBytecodeOffset); | |
492 | } | |
81345200 | 493 | |
93a37866 A |
494 | bool likelyToTakeSpecialFastCase(int bytecodeOffset) |
495 | { | |
81345200 | 496 | if (!hasBaselineJITProfiling()) |
93a37866 A |
497 | return false; |
498 | unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
499 | return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount(); | |
500 | } | |
81345200 | 501 | |
93a37866 A |
502 | bool couldTakeSpecialFastCase(int bytecodeOffset) |
503 | { | |
81345200 | 504 | if (!hasBaselineJITProfiling()) |
93a37866 A |
505 | return false; |
506 | unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
507 | return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount(); | |
508 | } | |
81345200 | 509 | |
93a37866 A |
510 | bool likelyToTakeDeepestSlowCase(int bytecodeOffset) |
511 | { | |
81345200 | 512 | if (!hasBaselineJITProfiling()) |
93a37866 A |
513 | return false; |
514 | unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
515 | unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
516 | unsigned value = slowCaseCount - specialFastCaseCount; | |
517 | return value >= Options::likelyToTakeSlowCaseMinimumCount(); | |
518 | } | |
81345200 | 519 | |
93a37866 A |
520 | bool likelyToTakeAnySlowCase(int bytecodeOffset) |
521 | { | |
81345200 | 522 | if (!hasBaselineJITProfiling()) |
93a37866 A |
523 | return false; |
524 | unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
525 | unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
526 | unsigned value = slowCaseCount + specialFastCaseCount; | |
527 | return value >= Options::likelyToTakeSlowCaseMinimumCount(); | |
528 | } | |
81345200 | 529 | |
93a37866 A |
530 | unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } |
531 | const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; } | |
532 | ArrayProfile* addArrayProfile(unsigned bytecodeOffset) | |
533 | { | |
534 | m_arrayProfiles.append(ArrayProfile(bytecodeOffset)); | |
535 | return &m_arrayProfiles.last(); | |
536 | } | |
537 | ArrayProfile* getArrayProfile(unsigned bytecodeOffset); | |
538 | ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset); | |
6fe7ccc8 | 539 | |
93a37866 | 540 | // Exception handling support |
9dae56ea | 541 | |
93a37866 | 542 | size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } |
93a37866 | 543 | HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } |
9dae56ea | 544 | |
93a37866 | 545 | bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); } |
9dae56ea | 546 | |
6fe7ccc8 | 547 | #if ENABLE(DFG_JIT) |
81345200 | 548 | Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins() |
93a37866 | 549 | { |
81345200 | 550 | return m_jitCode->dfgCommon()->codeOrigins; |
93a37866 | 551 | } |
81345200 | 552 | |
93a37866 A |
553 | // Having code origins implies that there has been some inlining. |
554 | bool hasCodeOrigins() | |
555 | { | |
81345200 | 556 | return JITCode::isOptimizingJIT(jitType()); |
93a37866 | 557 | } |
6fe7ccc8 | 558 | |
93a37866 A |
559 | bool canGetCodeOrigin(unsigned index) |
560 | { | |
81345200 | 561 | if (!hasCodeOrigins()) |
93a37866 | 562 | return false; |
81345200 | 563 | return index < codeOrigins().size(); |
93a37866 | 564 | } |
81345200 | 565 | |
93a37866 A |
566 | CodeOrigin codeOrigin(unsigned index) |
567 | { | |
81345200 | 568 | return codeOrigins()[index]; |
93a37866 | 569 | } |
81345200 | 570 | |
93a37866 A |
571 | bool addFrequentExitSite(const DFG::FrequentExitSite& site) |
572 | { | |
81345200 A |
573 | ASSERT(JITCode::isBaselineCode(jitType())); |
574 | ConcurrentJITLocker locker(m_lock); | |
575 | return m_exitProfile.add(locker, site); | |
576 | } | |
577 | ||
578 | bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const | |
579 | { | |
580 | return m_exitProfile.hasExitSite(locker, site); | |
581 | } | |
582 | bool hasExitSite(const DFG::FrequentExitSite& site) const | |
583 | { | |
584 | ConcurrentJITLocker locker(m_lock); | |
585 | return hasExitSite(locker, site); | |
93a37866 | 586 | } |
6fe7ccc8 | 587 | |
93a37866 | 588 | DFG::ExitProfile& exitProfile() { return m_exitProfile; } |
81345200 | 589 | |
93a37866 A |
590 | CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles() |
591 | { | |
592 | return m_lazyOperandValueProfiles; | |
593 | } | |
81345200 | 594 | #endif // ENABLE(DFG_JIT) |
6fe7ccc8 | 595 | |
93a37866 | 596 | // Constant Pool |
81345200 A |
597 | #if ENABLE(DFG_JIT) |
598 | size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); } | |
599 | size_t numberOfDFGIdentifiers() const | |
600 | { | |
601 | if (!JITCode::isOptimizingJIT(jitType())) | |
602 | return 0; | |
9dae56ea | 603 | |
81345200 A |
604 | return m_jitCode->dfgCommon()->dfgIdentifiers.size(); |
605 | } | |
9dae56ea | 606 | |
81345200 A |
607 | const Identifier& identifier(int index) const |
608 | { | |
609 | size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers(); | |
610 | if (static_cast<unsigned>(index) < unlinkedIdentifiers) | |
611 | return m_unlinkedCode->identifier(index); | |
612 | ASSERT(JITCode::isOptimizingJIT(jitType())); | |
613 | return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers]; | |
614 | } | |
615 | #else | |
616 | size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); } | |
617 | const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); } | |
618 | #endif | |
619 | ||
620 | Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; } | |
93a37866 A |
621 | size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } |
622 | unsigned addConstant(JSValue v) | |
623 | { | |
624 | unsigned result = m_constantRegisters.size(); | |
625 | m_constantRegisters.append(WriteBarrier<Unknown>()); | |
626 | m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v); | |
627 | return result; | |
628 | } | |
9dae56ea | 629 | |
81345200 A |
630 | unsigned addConstantLazily() |
631 | { | |
632 | unsigned result = m_constantRegisters.size(); | |
633 | m_constantRegisters.append(WriteBarrier<Unknown>()); | |
634 | return result; | |
635 | } | |
9dae56ea | 636 | |
81345200 | 637 | bool findConstant(JSValue, unsigned& result); |
93a37866 A |
638 | unsigned addOrFindConstant(JSValue); |
639 | WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } | |
640 | ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } | |
641 | ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } | |
9dae56ea | 642 | |
93a37866 A |
643 | FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } |
644 | int numberOfFunctionDecls() { return m_functionDecls.size(); } | |
645 | FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } | |
14957cd0 | 646 | |
93a37866 | 647 | RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); } |
14957cd0 | 648 | |
93a37866 A |
649 | unsigned numberOfConstantBuffers() const |
650 | { | |
651 | if (!m_rareData) | |
652 | return 0; | |
653 | return m_rareData->m_constantBuffers.size(); | |
654 | } | |
655 | unsigned addConstantBuffer(const Vector<JSValue>& buffer) | |
656 | { | |
657 | createRareDataIfNecessary(); | |
658 | unsigned size = m_rareData->m_constantBuffers.size(); | |
659 | m_rareData->m_constantBuffers.append(buffer); | |
660 | return size; | |
661 | } | |
9dae56ea | 662 | |
93a37866 A |
663 | Vector<JSValue>& constantBufferAsVector(unsigned index) |
664 | { | |
665 | ASSERT(m_rareData); | |
666 | return m_rareData->m_constantBuffers[index]; | |
667 | } | |
668 | JSValue* constantBuffer(unsigned index) | |
669 | { | |
670 | return constantBufferAsVector(index).data(); | |
671 | } | |
9dae56ea | 672 | |
81345200 | 673 | Heap* heap() const { return m_heap; } |
93a37866 | 674 | JSGlobalObject* globalObject() { return m_globalObject.get(); } |
81345200 | 675 | |
93a37866 | 676 | JSGlobalObject* globalObjectFor(CodeOrigin); |
9dae56ea | 677 | |
81345200 A |
678 | BytecodeLivenessAnalysis& livenessAnalysis() |
679 | { | |
680 | { | |
681 | ConcurrentJITLocker locker(m_lock); | |
682 | if (!!m_livenessAnalysis) | |
683 | return *m_livenessAnalysis; | |
684 | } | |
685 | std::unique_ptr<BytecodeLivenessAnalysis> analysis = | |
686 | std::make_unique<BytecodeLivenessAnalysis>(this); | |
687 | { | |
688 | ConcurrentJITLocker locker(m_lock); | |
689 | if (!m_livenessAnalysis) | |
690 | m_livenessAnalysis = WTF::move(analysis); | |
691 | return *m_livenessAnalysis; | |
692 | } | |
693 | } | |
694 | ||
695 | void validate(); | |
9dae56ea | 696 | |
81345200 | 697 | // Jump Tables |
9dae56ea | 698 | |
81345200 A |
699 | size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; } |
700 | SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); } | |
701 | SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; } | |
702 | void clearSwitchJumpTables() | |
703 | { | |
704 | if (!m_rareData) | |
705 | return; | |
706 | m_rareData->m_switchJumpTables.clear(); | |
707 | } | |
9dae56ea | 708 | |
93a37866 A |
709 | size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } |
710 | StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } | |
711 | StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } | |
9dae56ea | 712 | |
9dae56ea | 713 | |
81345200 | 714 | SymbolTable* symbolTable() const { return m_symbolTable.get(); } |
9dae56ea | 715 | |
93a37866 A |
716 | EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } |
717 | ||
718 | enum ShrinkMode { | |
719 | // Shrink prior to generating machine code that may point directly into vectors. | |
720 | EarlyShrink, | |
81345200 | 721 | |
93a37866 A |
722 | // Shrink after generating machine code, and after possibly creating new vectors |
723 | // and appending to others. At this time it is not safe to shrink certain vectors | |
724 | // because we would have generated machine code that references them directly. | |
725 | LateShrink | |
726 | }; | |
727 | void shrinkToFit(ShrinkMode); | |
81345200 | 728 | |
93a37866 A |
729 | // Functions for controlling when JITting kicks in, in a mixed mode |
730 | // execution world. | |
81345200 | 731 | |
93a37866 A |
732 | bool checkIfJITThresholdReached() |
733 | { | |
734 | return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); | |
735 | } | |
81345200 | 736 | |
93a37866 A |
737 | void dontJITAnytimeSoon() |
738 | { | |
739 | m_llintExecuteCounter.deferIndefinitely(); | |
740 | } | |
81345200 | 741 | |
93a37866 A |
742 | void jitAfterWarmUp() |
743 | { | |
744 | m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this); | |
745 | } | |
81345200 | 746 | |
93a37866 A |
747 | void jitSoon() |
748 | { | |
749 | m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this); | |
750 | } | |
81345200 A |
751 | |
752 | const BaselineExecutionCounter& llintExecuteCounter() const | |
93a37866 A |
753 | { |
754 | return m_llintExecuteCounter; | |
755 | } | |
81345200 | 756 | |
93a37866 A |
757 | // Functions for controlling when tiered compilation kicks in. This |
758 | // controls both when the optimizing compiler is invoked and when OSR | |
759 | // entry happens. Two triggers exist: the loop trigger and the return | |
760 | // trigger. In either case, when an addition to m_jitExecuteCounter | |
761 | // causes it to become non-negative, the optimizing compiler is | |
762 | // invoked. This includes a fast check to see if this CodeBlock has | |
763 | // already been optimized (i.e. replacement() returns a CodeBlock | |
764 | // that was optimized with a higher tier JIT than this one). In the | |
765 | // case of the loop trigger, if the optimized compilation succeeds | |
766 | // (or has already succeeded in the past) then OSR is attempted to | |
767 | // redirect program flow into the optimized code. | |
81345200 | 768 | |
93a37866 A |
769 | // These functions are called from within the optimization triggers, |
770 | // and are used as a single point at which we define the heuristics | |
771 | // for how much warm-up is mandated before the next optimization | |
772 | // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(), | |
773 | // as this is called from the CodeBlock constructor. | |
81345200 | 774 | |
93a37866 A |
775 | // When we observe a lot of speculation failures, we trigger a |
776 | // reoptimization. But each time, we increase the optimization trigger | |
777 | // to avoid thrashing. | |
81345200 | 778 | JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const; |
93a37866 | 779 | void countReoptimization(); |
81345200 A |
780 | #if ENABLE(JIT) |
781 | unsigned numberOfDFGCompiles(); | |
93a37866 A |
782 | |
783 | int32_t codeTypeThresholdMultiplier() const; | |
81345200 A |
784 | |
785 | int32_t adjustedCounterValue(int32_t desiredThreshold); | |
786 | ||
93a37866 A |
787 | int32_t* addressOfJITExecuteCounter() |
788 | { | |
789 | return &m_jitExecuteCounter.m_counter; | |
790 | } | |
93a37866 | 791 | |
81345200 A |
792 | static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); } |
793 | static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); } | |
794 | static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); } | |
795 | ||
796 | const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; } | |
797 | ||
93a37866 | 798 | unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } |
81345200 | 799 | |
93a37866 A |
800 | // Check if the optimization threshold has been reached, and if not, |
801 | // adjust the heuristics accordingly. Returns true if the threshold has | |
802 | // been reached. | |
803 | bool checkIfOptimizationThresholdReached(); | |
81345200 | 804 | |
93a37866 A |
805 | // Call this to force the next optimization trigger to fire. This is |
806 | // rarely wise, since optimization triggers are typically more | |
807 | // expensive than executing baseline code. | |
808 | void optimizeNextInvocation(); | |
81345200 | 809 | |
93a37866 A |
810 | // Call this to prevent optimization from happening again. Note that |
811 | // optimization will still happen after roughly 2^29 invocations, | |
812 | // so this is really meant to delay that as much as possible. This | |
813 | // is called if optimization failed, and we expect it to fail in | |
814 | // the future as well. | |
815 | void dontOptimizeAnytimeSoon(); | |
81345200 | 816 | |
93a37866 A |
817 | // Call this to reinitialize the counter to its starting state, |
818 | // forcing a warm-up to happen before the next optimization trigger | |
819 | // fires. This is called in the CodeBlock constructor. It also | |
820 | // makes sense to call this if an OSR exit occurred. Note that | |
821 | // OSR exit code is code generated, so the value of the execute | |
822 | // counter that this corresponds to is also available directly. | |
823 | void optimizeAfterWarmUp(); | |
81345200 | 824 | |
93a37866 A |
825 | // Call this to force an optimization trigger to fire only after |
826 | // a lot of warm-up. | |
827 | void optimizeAfterLongWarmUp(); | |
81345200 | 828 | |
93a37866 A |
829 | // Call this to cause an optimization trigger to fire soon, but |
830 | // not necessarily the next one. This makes sense if optimization | |
831 | // succeeds. Successfuly optimization means that all calls are | |
832 | // relinked to the optimized code, so this only affects call | |
833 | // frames that are still executing this CodeBlock. The value here | |
834 | // is tuned to strike a balance between the cost of OSR entry | |
835 | // (which is too high to warrant making every loop back edge to | |
836 | // trigger OSR immediately) and the cost of executing baseline | |
837 | // code (which is high enough that we don't necessarily want to | |
838 | // have a full warm-up). The intuition for calling this instead of | |
839 | // optimizeNextInvocation() is for the case of recursive functions | |
840 | // with loops. Consider that there may be N call frames of some | |
841 | // recursive function, for a reasonably large value of N. The top | |
842 | // one triggers optimization, and then returns, and then all of | |
843 | // the others return. We don't want optimization to be triggered on | |
844 | // each return, as that would be superfluous. It only makes sense | |
845 | // to trigger optimization if one of those functions becomes hot | |
846 | // in the baseline code. | |
847 | void optimizeSoon(); | |
81345200 A |
848 | |
849 | void forceOptimizationSlowPathConcurrently(); | |
850 | ||
851 | void setOptimizationThresholdBasedOnCompilationResult(CompilationResult); | |
852 | ||
93a37866 | 853 | uint32_t osrExitCounter() const { return m_osrExitCounter; } |
81345200 | 854 | |
93a37866 | 855 | void countOSRExit() { m_osrExitCounter++; } |
81345200 | 856 | |
93a37866 | 857 | uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; } |
81345200 | 858 | |
93a37866 | 859 | static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } |
6fe7ccc8 | 860 | |
93a37866 A |
861 | uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold); |
862 | uint32_t exitCountThresholdForReoptimization(); | |
863 | uint32_t exitCountThresholdForReoptimizationFromLoop(); | |
864 | bool shouldReoptimizeNow(); | |
865 | bool shouldReoptimizeFromLoopNow(); | |
81345200 A |
866 | #else // No JIT |
867 | void optimizeAfterWarmUp() { } | |
868 | unsigned numberOfDFGCompiles() { return 0; } | |
6fe7ccc8 A |
869 | #endif |
870 | ||
93a37866 | 871 | bool shouldOptimizeNow(); |
81345200 A |
872 | void updateAllValueProfilePredictions(); |
873 | void updateAllArrayPredictions(); | |
874 | void updateAllPredictions(); | |
6fe7ccc8 | 875 | |
81345200 A |
876 | unsigned frameRegisterCount(); |
877 | int stackPointerOffset(); | |
878 | ||
879 | bool hasOpDebugForLineAndColumn(unsigned line, unsigned column); | |
880 | ||
881 | bool hasDebuggerRequests() const { return m_debuggerRequests; } | |
882 | void* debuggerRequestsAddress() { return &m_debuggerRequests; } | |
883 | ||
884 | void addBreakpoint(unsigned numBreakpoints); | |
885 | void removeBreakpoint(unsigned numBreakpoints) | |
886 | { | |
887 | ASSERT(m_numBreakpoints >= numBreakpoints); | |
888 | m_numBreakpoints -= numBreakpoints; | |
889 | } | |
890 | ||
891 | enum SteppingMode { | |
892 | SteppingModeDisabled, | |
893 | SteppingModeEnabled | |
894 | }; | |
895 | void setSteppingMode(SteppingMode); | |
896 | ||
897 | void clearDebuggerRequests() | |
898 | { | |
899 | m_steppingMode = SteppingModeDisabled; | |
900 | m_numBreakpoints = 0; | |
901 | } | |
902 | ||
93a37866 | 903 | // FIXME: Make these remaining members private. |
9dae56ea | 904 | |
93a37866 A |
905 | int m_numCalleeRegisters; |
906 | int m_numVars; | |
81345200 A |
907 | bool m_isConstructor : 1; |
908 | ||
909 | // This is intentionally public; it's the responsibility of anyone doing any | |
910 | // of the following to hold the lock: | |
911 | // | |
912 | // - Modifying any inline cache in this code block. | |
913 | // | |
914 | // - Quering any inline cache in this code block, from a thread other than | |
915 | // the main thread. | |
916 | // | |
917 | // Additionally, it's only legal to modify the inline cache on the main | |
918 | // thread. This means that the main thread can query the inline cache without | |
919 | // locking. This is crucial since executing the inline cache is effectively | |
920 | // "querying" it. | |
921 | // | |
922 | // Another exception to the rules is that the GC can do whatever it wants | |
923 | // without holding any locks, because the GC is guaranteed to wait until any | |
924 | // concurrent compilation threads finish what they're doing. | |
925 | mutable ConcurrentJITLock m_lock; | |
926 | ||
927 | bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it. | |
928 | bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC. | |
929 | ||
930 | bool m_didFailFTLCompilation : 1; | |
931 | bool m_hasBeenCompiledWithFTL : 1; | |
932 | ||
933 | // Internal methods for use by validation code. It would be private if it wasn't | |
934 | // for the fact that we use it from anonymous namespaces. | |
935 | void beginValidationDidFail(); | |
936 | NO_RETURN_DUE_TO_CRASH void endValidationDidFail(); | |
937 | ||
938 | bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live. | |
9dae56ea | 939 | |
93a37866 | 940 | protected: |
81345200 A |
941 | virtual void visitWeakReferences(SlotVisitor&) override; |
942 | virtual void finalizeUnconditionally() override; | |
93a37866 | 943 | |
6fe7ccc8 | 944 | #if ENABLE(DFG_JIT) |
93a37866 | 945 | void tallyFrequentExitSites(); |
6fe7ccc8 | 946 | #else |
93a37866 A |
947 | void tallyFrequentExitSites() { } |
948 | #endif | |
949 | ||
950 | private: | |
81345200 A |
951 | friend class CodeBlockSet; |
952 | ||
953 | CodeBlock* specialOSREntryBlockOrNull(); | |
954 | ||
955 | void noticeIncomingCall(ExecState* callerFrame); | |
956 | ||
93a37866 A |
957 | double optimizationThresholdScalingFactor(); |
958 | ||
959 | #if ENABLE(JIT) | |
960 | ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr); | |
6fe7ccc8 A |
961 | #endif |
962 | ||
81345200 | 963 | void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); |
93a37866 | 964 | |
81345200 | 965 | void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants) |
93a37866 A |
966 | { |
967 | size_t count = constants.size(); | |
968 | m_constantRegisters.resize(count); | |
969 | for (size_t i = 0; i < count; i++) | |
970 | m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get()); | |
971 | } | |
972 | ||
81345200 A |
973 | void dumpBytecode( |
974 | PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, | |
975 | const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap()); | |
93a37866 | 976 | |
81345200 | 977 | CString registerName(int r) const; |
93a37866 A |
978 | void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); |
979 | void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); | |
980 | void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op); | |
981 | void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&); | |
81345200 | 982 | void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&); |
93a37866 | 983 | enum CacheDumpMode { DumpCaches, DontDumpCaches }; |
81345200 | 984 | void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&); |
93a37866 | 985 | void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); |
81345200 A |
986 | void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op); |
987 | void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand); | |
988 | ||
93a37866 A |
989 | void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling); |
990 | void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); | |
991 | void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); | |
93a37866 | 992 | void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling); |
6fe7ccc8 | 993 | |
81345200 A |
994 | bool shouldImmediatelyAssumeLivenessDuringScan(); |
995 | ||
996 | void propagateTransitions(SlotVisitor&); | |
997 | void determineLiveness(SlotVisitor&); | |
6fe7ccc8 | 998 | |
93a37866 A |
999 | void stronglyVisitStrongReferences(SlotVisitor&); |
1000 | void stronglyVisitWeakReferences(SlotVisitor&); | |
9dae56ea | 1001 | |
93a37866 A |
1002 | void createRareDataIfNecessary() |
1003 | { | |
1004 | if (!m_rareData) | |
1005 | m_rareData = adoptPtr(new RareData); | |
1006 | } | |
81345200 | 1007 | |
93a37866 A |
1008 | #if ENABLE(JIT) |
1009 | void resetStubInternal(RepatchBuffer&, StructureStubInfo&); | |
1010 | void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&); | |
1011 | #endif | |
1012 | WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode; | |
1013 | int m_numParameters; | |
81345200 A |
1014 | union { |
1015 | unsigned m_debuggerRequests; | |
1016 | struct { | |
1017 | unsigned m_hasDebuggerStatement : 1; | |
1018 | unsigned m_steppingMode : 1; | |
1019 | unsigned m_numBreakpoints : 30; | |
1020 | }; | |
1021 | }; | |
93a37866 A |
1022 | WriteBarrier<ScriptExecutable> m_ownerExecutable; |
1023 | VM* m_vm; | |
9dae56ea | 1024 | |
93a37866 | 1025 | RefCountedArray<Instruction> m_instructions; |
81345200 A |
1026 | WriteBarrier<SymbolTable> m_symbolTable; |
1027 | VirtualRegister m_thisRegister; | |
1028 | VirtualRegister m_argumentsRegister; | |
1029 | VirtualRegister m_activationRegister; | |
9dae56ea | 1030 | |
93a37866 A |
1031 | bool m_isStrictMode; |
1032 | bool m_needsActivation; | |
81345200 A |
1033 | bool m_mayBeExecuting; |
1034 | uint8_t m_visitAggregateHasBeenCalled; | |
9dae56ea | 1035 | |
93a37866 A |
1036 | RefPtr<SourceProvider> m_source; |
1037 | unsigned m_sourceOffset; | |
1038 | unsigned m_firstLineColumnOffset; | |
1039 | unsigned m_codeType; | |
9dae56ea | 1040 | |
81345200 A |
1041 | Vector<LLIntCallLinkInfo> m_llintCallLinkInfos; |
1042 | SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls; | |
1043 | RefPtr<JITCode> m_jitCode; | |
4e4e5a6f | 1044 | #if ENABLE(JIT) |
81345200 | 1045 | Bag<StructureStubInfo> m_stubInfos; |
93a37866 | 1046 | Vector<ByValInfo> m_byValInfos; |
81345200 A |
1047 | Bag<CallLinkInfo> m_callLinkInfos; |
1048 | SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls; | |
6fe7ccc8 | 1049 | #endif |
93a37866 | 1050 | OwnPtr<CompactJITCodeMap> m_jitCodeMap; |
6fe7ccc8 | 1051 | #if ENABLE(DFG_JIT) |
93a37866 A |
1052 | // This is relevant to non-DFG code blocks that serve as the profiled code block |
1053 | // for DFG code blocks. | |
1054 | DFG::ExitProfile m_exitProfile; | |
1055 | CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles; | |
6fe7ccc8 | 1056 | #endif |
93a37866 | 1057 | Vector<ValueProfile> m_argumentValueProfiles; |
81345200 | 1058 | Vector<ValueProfile> m_valueProfiles; |
93a37866 A |
1059 | SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; |
1060 | SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles; | |
81345200 | 1061 | Vector<ArrayAllocationProfile> m_arrayAllocationProfiles; |
93a37866 | 1062 | ArrayProfileVector m_arrayProfiles; |
81345200 | 1063 | Vector<ObjectAllocationProfile> m_objectAllocationProfiles; |
9dae56ea | 1064 | |
93a37866 | 1065 | // Constant Pool |
93a37866 A |
1066 | COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown); |
1067 | // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates | |
1068 | // it, so we're stuck with it for now. | |
81345200 A |
1069 | Vector<WriteBarrier<Unknown>> m_constantRegisters; |
1070 | Vector<WriteBarrier<FunctionExecutable>> m_functionDecls; | |
1071 | Vector<WriteBarrier<FunctionExecutable>> m_functionExprs; | |
9dae56ea | 1072 | |
81345200 A |
1073 | RefPtr<CodeBlock> m_alternative; |
1074 | ||
1075 | BaselineExecutionCounter m_llintExecuteCounter; | |
1076 | ||
1077 | BaselineExecutionCounter m_jitExecuteCounter; | |
93a37866 A |
1078 | int32_t m_totalJITExecutions; |
1079 | uint32_t m_osrExitCounter; | |
1080 | uint16_t m_optimizationDelayCounter; | |
1081 | uint16_t m_reoptimizationRetryCounter; | |
81345200 A |
1082 | |
1083 | mutable CodeBlockHash m_hash; | |
9dae56ea | 1084 | |
81345200 | 1085 | std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis; |
9dae56ea | 1086 | |
93a37866 A |
1087 | struct RareData { |
1088 | WTF_MAKE_FAST_ALLOCATED; | |
1089 | public: | |
1090 | Vector<HandlerInfo> m_exceptionHandlers; | |
1091 | ||
1092 | // Buffers used for large array literals | |
81345200 A |
1093 | Vector<Vector<JSValue>> m_constantBuffers; |
1094 | ||
93a37866 | 1095 | // Jump Tables |
81345200 | 1096 | Vector<SimpleJumpTable> m_switchJumpTables; |
93a37866 | 1097 | Vector<StringJumpTable> m_stringSwitchJumpTables; |
9dae56ea | 1098 | |
93a37866 | 1099 | EvalCodeCache m_evalCodeCache; |
93a37866 | 1100 | }; |
14957cd0 | 1101 | #if COMPILER(MSVC) |
93a37866 | 1102 | friend void WTF::deleteOwnedPtr<RareData>(RareData*); |
14957cd0 | 1103 | #endif |
93a37866 | 1104 | OwnPtr<RareData> m_rareData; |
6fe7ccc8 | 1105 | #if ENABLE(JIT) |
81345200 | 1106 | DFG::CapabilityLevel m_capabilityLevelState; |
6fe7ccc8 | 1107 | #endif |
93a37866 | 1108 | }; |
9dae56ea | 1109 | |
93a37866 A |
1110 | // Program code is not marked by any function, so we make the global object |
1111 | // responsible for marking it. | |
9dae56ea | 1112 | |
93a37866 A |
1113 | class GlobalCodeBlock : public CodeBlock { |
1114 | protected: | |
1115 | GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other) | |
81345200 | 1116 | : CodeBlock(CopyParsedBlock, other) |
93a37866 A |
1117 | { |
1118 | } | |
6fe7ccc8 | 1119 | |
81345200 A |
1120 | GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) |
1121 | : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) | |
93a37866 A |
1122 | { |
1123 | } | |
1124 | }; | |
9dae56ea | 1125 | |
93a37866 A |
1126 | class ProgramCodeBlock : public GlobalCodeBlock { |
1127 | public: | |
1128 | ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other) | |
81345200 | 1129 | : GlobalCodeBlock(CopyParsedBlock, other) |
93a37866 A |
1130 | { |
1131 | } | |
f9bf01c6 | 1132 | |
81345200 A |
1133 | ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset) |
1134 | : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset) | |
93a37866 A |
1135 | { |
1136 | } | |
6fe7ccc8 | 1137 | |
6fe7ccc8 | 1138 | #if ENABLE(JIT) |
93a37866 | 1139 | protected: |
81345200 A |
1140 | virtual CodeBlock* replacement() override; |
1141 | virtual DFG::CapabilityLevel capabilityLevelInternal() override; | |
6fe7ccc8 | 1142 | #endif |
93a37866 | 1143 | }; |
9dae56ea | 1144 | |
93a37866 A |
1145 | class EvalCodeBlock : public GlobalCodeBlock { |
1146 | public: | |
1147 | EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other) | |
81345200 | 1148 | : GlobalCodeBlock(CopyParsedBlock, other) |
93a37866 A |
1149 | { |
1150 | } | |
6fe7ccc8 | 1151 | |
81345200 A |
1152 | EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider) |
1153 | : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1) | |
93a37866 A |
1154 | { |
1155 | } | |
81345200 | 1156 | |
93a37866 A |
1157 | const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); } |
1158 | unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); } | |
81345200 | 1159 | |
6fe7ccc8 | 1160 | #if ENABLE(JIT) |
93a37866 | 1161 | protected: |
81345200 A |
1162 | virtual CodeBlock* replacement() override; |
1163 | virtual DFG::CapabilityLevel capabilityLevelInternal() override; | |
6fe7ccc8 | 1164 | #endif |
81345200 | 1165 | |
93a37866 A |
1166 | private: |
1167 | UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); } | |
1168 | }; | |
6fe7ccc8 | 1169 | |
93a37866 A |
1170 | class FunctionCodeBlock : public CodeBlock { |
1171 | public: | |
1172 | FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other) | |
81345200 | 1173 | : CodeBlock(CopyParsedBlock, other) |
ba379fdc | 1174 | { |
ba379fdc A |
1175 | } |
1176 | ||
81345200 A |
1177 | FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset) |
1178 | : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset) | |
14957cd0 | 1179 | { |
14957cd0 | 1180 | } |
81345200 | 1181 | |
93a37866 A |
1182 | #if ENABLE(JIT) |
1183 | protected: | |
81345200 A |
1184 | virtual CodeBlock* replacement() override; |
1185 | virtual DFG::CapabilityLevel capabilityLevelInternal() override; | |
93a37866 A |
1186 | #endif |
1187 | }; | |
1188 | ||
1189 | inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame) | |
1190 | { | |
1191 | RELEASE_ASSERT(inlineCallFrame); | |
1192 | ExecutableBase* executable = inlineCallFrame->executable.get(); | |
81345200 | 1193 | RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info()); |
93a37866 A |
1194 | return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct); |
1195 | } | |
81345200 | 1196 | |
93a37866 A |
1197 | inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) |
1198 | { | |
1199 | if (codeOrigin.inlineCallFrame) | |
1200 | return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); | |
1201 | return baselineCodeBlock; | |
1202 | } | |
1203 | ||
1204 | inline int CodeBlock::argumentIndexAfterCapture(size_t argument) | |
1205 | { | |
1206 | if (argument >= static_cast<size_t>(symbolTable()->parameterCount())) | |
1207 | return CallFrame::argumentOffset(argument); | |
81345200 | 1208 | |
93a37866 A |
1209 | const SlowArgument* slowArguments = symbolTable()->slowArguments(); |
1210 | if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal) | |
1211 | return CallFrame::argumentOffset(argument); | |
81345200 | 1212 | |
93a37866 A |
1213 | ASSERT(slowArguments[argument].status == SlowArgument::Captured); |
1214 | return slowArguments[argument].index; | |
1215 | } | |
1216 | ||
81345200 A |
1217 | inline bool CodeBlock::hasSlowArguments() |
1218 | { | |
1219 | return !!symbolTable()->slowArguments(); | |
1220 | } | |
1221 | ||
93a37866 A |
1222 | inline Register& ExecState::r(int index) |
1223 | { | |
1224 | CodeBlock* codeBlock = this->codeBlock(); | |
1225 | if (codeBlock->isConstantRegisterIndex(index)) | |
1226 | return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index)); | |
1227 | return this[index]; | |
1228 | } | |
1229 | ||
1230 | inline Register& ExecState::uncheckedR(int index) | |
1231 | { | |
1232 | RELEASE_ASSERT(index < FirstConstantRegisterIndex); | |
1233 | return this[index]; | |
1234 | } | |
6fe7ccc8 | 1235 | |
93a37866 A |
1236 | inline JSValue ExecState::argumentAfterCapture(size_t argument) |
1237 | { | |
1238 | if (argument >= argumentCount()) | |
1239 | return jsUndefined(); | |
81345200 | 1240 | |
93a37866 A |
1241 | if (!codeBlock()) |
1242 | return this[argumentOffset(argument)].jsValue(); | |
81345200 | 1243 | |
93a37866 A |
1244 | return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue(); |
1245 | } | |
1246 | ||
81345200 | 1247 | inline void CodeBlockSet::mark(void* candidateCodeBlock) |
93a37866 A |
1248 | { |
1249 | // We have to check for 0 and -1 because those are used by the HashMap as markers. | |
1250 | uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock); | |
81345200 | 1251 | |
93a37866 A |
1252 | // This checks for both of those nasty cases in one go. |
1253 | // 0 + 1 = 1 | |
1254 | // -1 + 1 = 0 | |
1255 | if (value + 1 <= 1) | |
1256 | return; | |
81345200 A |
1257 | |
1258 | CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock); | |
1259 | if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock)) | |
93a37866 | 1260 | return; |
81345200 A |
1261 | |
1262 | mark(codeBlock); | |
93a37866 | 1263 | } |
81345200 A |
1264 | |
1265 | inline void CodeBlockSet::mark(CodeBlock* codeBlock) | |
1266 | { | |
1267 | if (!codeBlock) | |
1268 | return; | |
14957cd0 | 1269 | |
81345200 A |
1270 | if (codeBlock->m_mayBeExecuting) |
1271 | return; | |
1272 | ||
1273 | codeBlock->m_mayBeExecuting = true; | |
1274 | // We might not have cleared the marks for this CodeBlock, but we need to visit it. | |
1275 | codeBlock->m_visitAggregateHasBeenCalled = false; | |
1276 | #if ENABLE(GGC) | |
1277 | m_currentlyExecuting.append(codeBlock); | |
1278 | #endif | |
1279 | } | |
1280 | ||
1281 | template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor) | |
1282 | { | |
1283 | switch (type()) { | |
1284 | case ProgramExecutableType: { | |
1285 | if (CodeBlock* codeBlock = jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get()) | |
1286 | codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor)); | |
1287 | break; | |
1288 | } | |
1289 | ||
1290 | case EvalExecutableType: { | |
1291 | if (CodeBlock* codeBlock = jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get()) | |
1292 | codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor)); | |
1293 | break; | |
1294 | } | |
1295 | ||
1296 | case FunctionExecutableType: { | |
1297 | Functor f(std::forward<Functor>(functor)); | |
1298 | FunctionExecutable* executable = jsCast<FunctionExecutable*>(this); | |
1299 | if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get()) | |
1300 | codeBlock->forEachRelatedCodeBlock(f); | |
1301 | if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get()) | |
1302 | codeBlock->forEachRelatedCodeBlock(f); | |
1303 | break; | |
1304 | } | |
1305 | default: | |
1306 | RELEASE_ASSERT_NOT_REACHED(); | |
1307 | } | |
1308 | } | |
1309 | ||
9dae56ea A |
1310 | } // namespace JSC |
1311 | ||
1312 | #endif // CodeBlock_h |