]>
Commit | Line | Data |
---|---|---|
9dae56ea | 1 | /* |
14957cd0 | 2 | * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved. |
9dae56ea A |
3 | * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * | |
9 | * 1. Redistributions of source code must retain the above copyright | |
10 | * notice, this list of conditions and the following disclaimer. | |
11 | * 2. Redistributions in binary form must reproduce the above copyright | |
12 | * notice, this list of conditions and the following disclaimer in the | |
13 | * documentation and/or other materials provided with the distribution. | |
14 | * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of | |
15 | * its contributors may be used to endorse or promote products derived | |
16 | * from this software without specific prior written permission. | |
17 | * | |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | */ | |
29 | ||
30 | #ifndef CodeBlock_h | |
31 | #define CodeBlock_h | |
32 | ||
6fe7ccc8 A |
33 | #include "BytecodeConventions.h" |
34 | #include "CallLinkInfo.h" | |
35 | #include "CallReturnOffsetToBytecodeOffset.h" | |
36 | #include "CodeOrigin.h" | |
37 | #include "CodeType.h" | |
38 | #include "CompactJITCodeMap.h" | |
39 | #include "DFGCodeBlocks.h" | |
40 | #include "DFGExitProfile.h" | |
41 | #include "DFGOSREntry.h" | |
42 | #include "DFGOSRExit.h" | |
9dae56ea | 43 | #include "EvalCodeCache.h" |
6fe7ccc8 A |
44 | #include "ExecutionCounter.h" |
45 | #include "ExpressionRangeInfo.h" | |
46 | #include "GlobalResolveInfo.h" | |
47 | #include "HandlerInfo.h" | |
48 | #include "MethodCallLinkInfo.h" | |
49 | #include "Options.h" | |
9dae56ea | 50 | #include "Instruction.h" |
ba379fdc | 51 | #include "JITCode.h" |
14957cd0 | 52 | #include "JITWriteBarrier.h" |
9dae56ea A |
53 | #include "JSGlobalObject.h" |
54 | #include "JumpTable.h" | |
6fe7ccc8 A |
55 | #include "LLIntCallLinkInfo.h" |
56 | #include "LazyOperandValueProfile.h" | |
57 | #include "LineInfo.h" | |
9dae56ea | 58 | #include "Nodes.h" |
14957cd0 | 59 | #include "RegExpObject.h" |
6fe7ccc8 | 60 | #include "StructureStubInfo.h" |
9dae56ea | 61 | #include "UString.h" |
6fe7ccc8 A |
62 | #include "UnconditionalFinalizer.h" |
63 | #include "ValueProfile.h" | |
64 | #include <wtf/RefCountedArray.h> | |
ba379fdc | 65 | #include <wtf/FastAllocBase.h> |
14957cd0 | 66 | #include <wtf/PassOwnPtr.h> |
9dae56ea | 67 | #include <wtf/RefPtr.h> |
6fe7ccc8 | 68 | #include <wtf/SegmentedVector.h> |
9dae56ea | 69 | #include <wtf/Vector.h> |
9dae56ea | 70 | #include "StructureStubInfo.h" |
ba379fdc | 71 | |
9dae56ea A |
72 | namespace JSC { |
73 | ||
6fe7ccc8 | 74 | class DFGCodeBlocks; |
9dae56ea | 75 | class ExecState; |
6fe7ccc8 | 76 | class LLIntOffsetsExtractor; |
9dae56ea | 77 | |
14957cd0 A |
78 | inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; } |
79 | ||
9dae56ea A |
80 | static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); } |
81 | ||
6fe7ccc8 | 82 | class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester { |
14957cd0 | 83 | WTF_MAKE_FAST_ALLOCATED; |
9dae56ea | 84 | friend class JIT; |
6fe7ccc8 A |
85 | friend class LLIntOffsetsExtractor; |
86 | public: | |
87 | enum CopyParsedBlockTag { CopyParsedBlock }; | |
f9bf01c6 | 88 | protected: |
6fe7ccc8 A |
89 | CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable*); |
90 | ||
91 | CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative); | |
14957cd0 A |
92 | |
93 | WriteBarrier<JSGlobalObject> m_globalObject; | |
94 | Heap* m_heap; | |
95 | ||
9dae56ea | 96 | public: |
6fe7ccc8 A |
97 | JS_EXPORT_PRIVATE virtual ~CodeBlock(); |
98 | ||
99 | int numParameters() const { return m_numParameters; } | |
100 | void setNumParameters(int newValue); | |
101 | void addParameter(); | |
102 | ||
103 | int* addressOfNumParameters() { return &m_numParameters; } | |
104 | static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } | |
9dae56ea | 105 | |
6fe7ccc8 A |
106 | CodeBlock* alternative() { return m_alternative.get(); } |
107 | PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); } | |
108 | void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; } | |
109 | ||
110 | CodeSpecializationKind specializationKind() | |
111 | { | |
112 | if (m_isConstructor) | |
113 | return CodeForConstruct; | |
114 | return CodeForCall; | |
115 | } | |
116 | ||
117 | #if ENABLE(JIT) | |
118 | CodeBlock* baselineVersion() | |
119 | { | |
120 | CodeBlock* result = replacement(); | |
121 | if (!result) | |
122 | return 0; // This can happen if we're in the process of creating the baseline version. | |
123 | while (result->alternative()) | |
124 | result = result->alternative(); | |
125 | ASSERT(result); | |
126 | ASSERT(JITCode::isBaselineCode(result->getJITType())); | |
127 | return result; | |
128 | } | |
129 | #endif | |
130 | ||
14957cd0 | 131 | void visitAggregate(SlotVisitor&); |
9dae56ea A |
132 | |
133 | static void dumpStatistics(); | |
134 | ||
9dae56ea A |
135 | void dump(ExecState*) const; |
136 | void printStructures(const Instruction*) const; | |
137 | void printStructure(const char* name, const Instruction*, int operand) const; | |
9dae56ea | 138 | |
14957cd0 A |
139 | bool isStrictMode() const { return m_isStrictMode; } |
140 | ||
9dae56ea A |
141 | inline bool isKnownNotImmediate(int index) |
142 | { | |
14957cd0 | 143 | if (index == m_thisRegister && !m_isStrictMode) |
9dae56ea A |
144 | return true; |
145 | ||
146 | if (isConstantRegisterIndex(index)) | |
147 | return getConstant(index).isCell(); | |
148 | ||
149 | return false; | |
150 | } | |
151 | ||
9dae56ea A |
152 | ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) |
153 | { | |
ba379fdc | 154 | return index >= m_numVars; |
9dae56ea A |
155 | } |
156 | ||
157 | HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); | |
14957cd0 A |
158 | int lineNumberForBytecodeOffset(unsigned bytecodeOffset); |
159 | void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset); | |
9dae56ea A |
160 | |
161 | #if ENABLE(JIT) | |
9dae56ea | 162 | |
ba379fdc | 163 | StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress) |
9dae56ea | 164 | { |
14957cd0 | 165 | return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value())); |
9dae56ea A |
166 | } |
167 | ||
6fe7ccc8 A |
168 | StructureStubInfo& getStubInfo(unsigned bytecodeIndex) |
169 | { | |
170 | return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex)); | |
171 | } | |
172 | ||
ba379fdc | 173 | CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress) |
9dae56ea | 174 | { |
14957cd0 | 175 | return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value())); |
9dae56ea | 176 | } |
6fe7ccc8 A |
177 | |
178 | CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex) | |
179 | { | |
180 | return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex)); | |
181 | } | |
9dae56ea | 182 | |
ba379fdc A |
183 | MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress) |
184 | { | |
14957cd0 | 185 | return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value())); |
ba379fdc A |
186 | } |
187 | ||
6fe7ccc8 A |
188 | MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex) |
189 | { | |
190 | return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex)); | |
191 | } | |
192 | ||
193 | unsigned bytecodeOffset(ExecState*, ReturnAddressPtr); | |
194 | ||
195 | unsigned bytecodeOffsetForCallAtIndex(unsigned index) | |
9dae56ea | 196 | { |
14957cd0 A |
197 | if (!m_rareData) |
198 | return 1; | |
199 | Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector; | |
200 | if (!callIndices.size()) | |
201 | return 1; | |
6fe7ccc8 A |
202 | ASSERT(index < m_rareData->m_callReturnIndexVector.size()); |
203 | return m_rareData->m_callReturnIndexVector[index].bytecodeOffset; | |
9dae56ea | 204 | } |
14957cd0 A |
205 | |
206 | void unlinkCalls(); | |
6fe7ccc8 A |
207 | |
208 | bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); } | |
209 | ||
210 | void linkIncomingCall(CallLinkInfo* incoming) | |
211 | { | |
212 | m_incomingCalls.push(incoming); | |
213 | } | |
214 | #if ENABLE(LLINT) | |
215 | void linkIncomingCall(LLIntCallLinkInfo* incoming) | |
216 | { | |
217 | m_incomingLLIntCalls.push(incoming); | |
218 | } | |
219 | #endif // ENABLE(LLINT) | |
220 | ||
221 | void unlinkIncomingCalls(); | |
222 | #endif // ENABLE(JIT) | |
223 | ||
224 | #if ENABLE(DFG_JIT) || ENABLE(LLINT) | |
225 | void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap) | |
226 | { | |
227 | m_jitCodeMap = jitCodeMap; | |
228 | } | |
229 | CompactJITCodeMap* jitCodeMap() | |
230 | { | |
231 | return m_jitCodeMap.get(); | |
232 | } | |
233 | #endif | |
234 | ||
235 | #if ENABLE(DFG_JIT) | |
236 | void createDFGDataIfNecessary() | |
237 | { | |
238 | if (!!m_dfgData) | |
239 | return; | |
240 | ||
241 | m_dfgData = adoptPtr(new DFGData); | |
242 | } | |
243 | ||
244 | DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset) | |
245 | { | |
246 | createDFGDataIfNecessary(); | |
247 | DFG::OSREntryData entry; | |
248 | entry.m_bytecodeIndex = bytecodeIndex; | |
249 | entry.m_machineCodeOffset = machineCodeOffset; | |
250 | m_dfgData->osrEntry.append(entry); | |
251 | return &m_dfgData->osrEntry.last(); | |
252 | } | |
253 | unsigned numberOfDFGOSREntries() const | |
254 | { | |
255 | if (!m_dfgData) | |
256 | return 0; | |
257 | return m_dfgData->osrEntry.size(); | |
258 | } | |
259 | DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; } | |
260 | DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex) | |
261 | { | |
262 | return binarySearch<DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(), bytecodeIndex); | |
263 | } | |
264 | ||
265 | void appendOSRExit(const DFG::OSRExit& osrExit) | |
266 | { | |
267 | createDFGDataIfNecessary(); | |
268 | m_dfgData->osrExit.append(osrExit); | |
269 | } | |
270 | ||
271 | DFG::OSRExit& lastOSRExit() | |
272 | { | |
273 | return m_dfgData->osrExit.last(); | |
274 | } | |
275 | ||
276 | void appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery) | |
277 | { | |
278 | createDFGDataIfNecessary(); | |
279 | m_dfgData->speculationRecovery.append(recovery); | |
280 | } | |
281 | ||
282 | unsigned numberOfOSRExits() | |
283 | { | |
284 | if (!m_dfgData) | |
285 | return 0; | |
286 | return m_dfgData->osrExit.size(); | |
287 | } | |
288 | ||
289 | unsigned numberOfSpeculationRecoveries() | |
290 | { | |
291 | if (!m_dfgData) | |
292 | return 0; | |
293 | return m_dfgData->speculationRecovery.size(); | |
294 | } | |
295 | ||
296 | DFG::OSRExit& osrExit(unsigned index) | |
297 | { | |
298 | return m_dfgData->osrExit[index]; | |
299 | } | |
300 | ||
301 | DFG::SpeculationRecovery& speculationRecovery(unsigned index) | |
302 | { | |
303 | return m_dfgData->speculationRecovery[index]; | |
304 | } | |
305 | ||
306 | void appendWeakReference(JSCell* target) | |
307 | { | |
308 | createDFGDataIfNecessary(); | |
309 | m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target)); | |
310 | } | |
311 | ||
312 | void shrinkWeakReferencesToFit() | |
313 | { | |
314 | if (!m_dfgData) | |
315 | return; | |
316 | m_dfgData->weakReferences.shrinkToFit(); | |
317 | } | |
318 | ||
319 | void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to) | |
320 | { | |
321 | createDFGDataIfNecessary(); | |
322 | m_dfgData->transitions.append( | |
323 | WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to)); | |
324 | } | |
325 | ||
326 | void shrinkWeakReferenceTransitionsToFit() | |
327 | { | |
328 | if (!m_dfgData) | |
329 | return; | |
330 | m_dfgData->transitions.shrinkToFit(); | |
331 | } | |
9dae56ea | 332 | #endif |
14957cd0 | 333 | |
14957cd0 | 334 | unsigned bytecodeOffset(Instruction* returnAddress) |
4e4e5a6f | 335 | { |
6fe7ccc8 | 336 | ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); |
4e4e5a6f A |
337 | return static_cast<Instruction*>(returnAddress) - instructions().begin(); |
338 | } | |
9dae56ea A |
339 | |
340 | void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; } | |
341 | bool isNumericCompareFunction() { return m_isNumericCompareFunction; } | |
342 | ||
6fe7ccc8 A |
343 | unsigned numberOfInstructions() const { return m_instructions.size(); } |
344 | RefCountedArray<Instruction>& instructions() { return m_instructions; } | |
345 | const RefCountedArray<Instruction>& instructions() const { return m_instructions; } | |
346 | ||
347 | size_t predictedMachineCodeSize(); | |
348 | ||
349 | bool usesOpcode(OpcodeID); | |
f9bf01c6 | 350 | |
6fe7ccc8 | 351 | unsigned instructionCount() { return m_instructions.size(); } |
9dae56ea A |
352 | |
353 | #if ENABLE(JIT) | |
6fe7ccc8 A |
354 | void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck) |
355 | { | |
356 | m_jitCode = code; | |
357 | m_jitCodeWithArityCheck = codeWithArityCheck; | |
358 | #if ENABLE(DFG_JIT) | |
359 | if (m_jitCode.jitType() == JITCode::DFGJIT) { | |
360 | createDFGDataIfNecessary(); | |
361 | m_globalData->heap.m_dfgCodeBlocks.m_set.add(this); | |
362 | } | |
363 | #endif | |
364 | } | |
365 | JITCode& getJITCode() { return m_jitCode; } | |
366 | MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; } | |
367 | JITCode::JITType getJITType() { return m_jitCode.jitType(); } | |
368 | ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); } | |
369 | virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0; | |
370 | virtual void jettison() = 0; | |
371 | enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully }; | |
372 | JITCompilationResult jitCompile(JSGlobalData& globalData) | |
373 | { | |
374 | if (getJITType() != JITCode::InterpreterThunk) { | |
375 | ASSERT(getJITType() == JITCode::BaselineJIT); | |
376 | return AlreadyCompiled; | |
377 | } | |
378 | #if ENABLE(JIT) | |
379 | if (jitCompileImpl(globalData)) | |
380 | return CompiledSuccessfully; | |
381 | return CouldNotCompile; | |
382 | #else | |
383 | UNUSED_PARAM(globalData); | |
384 | return CouldNotCompile; | |
385 | #endif | |
386 | } | |
387 | virtual CodeBlock* replacement() = 0; | |
388 | ||
389 | enum CompileWithDFGState { | |
390 | CompileWithDFGFalse, | |
391 | CompileWithDFGTrue, | |
392 | CompileWithDFGUnset | |
393 | }; | |
394 | ||
395 | virtual bool canCompileWithDFGInternal() = 0; | |
396 | bool canCompileWithDFG() | |
397 | { | |
398 | bool result = canCompileWithDFGInternal(); | |
399 | m_canCompileWithDFGState = result ? CompileWithDFGTrue : CompileWithDFGFalse; | |
400 | return result; | |
401 | } | |
402 | CompileWithDFGState canCompileWithDFGState() { return m_canCompileWithDFGState; } | |
403 | ||
404 | bool hasOptimizedReplacement() | |
405 | { | |
406 | ASSERT(JITCode::isBaselineCode(getJITType())); | |
407 | bool result = replacement()->getJITType() > getJITType(); | |
408 | #if !ASSERT_DISABLED | |
409 | if (result) | |
410 | ASSERT(replacement()->getJITType() == JITCode::DFGJIT); | |
411 | else { | |
412 | ASSERT(JITCode::isBaselineCode(replacement()->getJITType())); | |
413 | ASSERT(replacement() == this); | |
414 | } | |
415 | #endif | |
416 | return result; | |
417 | } | |
418 | #else | |
419 | JITCode::JITType getJITType() { return JITCode::BaselineJIT; } | |
9dae56ea A |
420 | #endif |
421 | ||
14957cd0 | 422 | ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); } |
9dae56ea A |
423 | |
424 | void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; } | |
6fe7ccc8 | 425 | JSGlobalData* globalData() { return m_globalData; } |
9dae56ea A |
426 | |
427 | void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } | |
428 | int thisRegister() const { return m_thisRegister; } | |
429 | ||
430 | void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; } | |
431 | bool needsFullScopeChain() const { return m_needsFullScopeChain; } | |
432 | void setUsesEval(bool usesEval) { m_usesEval = usesEval; } | |
433 | bool usesEval() const { return m_usesEval; } | |
14957cd0 A |
434 | |
435 | void setArgumentsRegister(int argumentsRegister) | |
436 | { | |
437 | ASSERT(argumentsRegister != -1); | |
438 | m_argumentsRegister = argumentsRegister; | |
439 | ASSERT(usesArguments()); | |
440 | } | |
441 | int argumentsRegister() | |
442 | { | |
443 | ASSERT(usesArguments()); | |
444 | return m_argumentsRegister; | |
445 | } | |
446 | void setActivationRegister(int activationRegister) | |
447 | { | |
448 | m_activationRegister = activationRegister; | |
449 | } | |
450 | int activationRegister() | |
451 | { | |
452 | ASSERT(needsFullScopeChain()); | |
453 | return m_activationRegister; | |
454 | } | |
455 | bool usesArguments() const { return m_argumentsRegister != -1; } | |
9dae56ea A |
456 | |
457 | CodeType codeType() const { return m_codeType; } | |
458 | ||
f9bf01c6 A |
459 | SourceProvider* source() const { return m_source.get(); } |
460 | unsigned sourceOffset() const { return m_sourceOffset; } | |
9dae56ea A |
461 | |
462 | size_t numberOfJumpTargets() const { return m_jumpTargets.size(); } | |
463 | void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); } | |
464 | unsigned jumpTarget(int index) const { return m_jumpTargets[index]; } | |
465 | unsigned lastJumpTarget() const { return m_jumpTargets.last(); } | |
466 | ||
14957cd0 A |
467 | void createActivation(CallFrame*); |
468 | ||
469 | void clearEvalCache(); | |
470 | ||
14957cd0 A |
471 | void addPropertyAccessInstruction(unsigned propertyAccessInstruction) |
472 | { | |
6fe7ccc8 | 473 | m_propertyAccessInstructions.append(propertyAccessInstruction); |
14957cd0 A |
474 | } |
475 | void addGlobalResolveInstruction(unsigned globalResolveInstruction) | |
476 | { | |
6fe7ccc8 | 477 | m_globalResolveInstructions.append(globalResolveInstruction); |
14957cd0 | 478 | } |
9dae56ea | 479 | bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset); |
6fe7ccc8 A |
480 | #if ENABLE(LLINT) |
481 | LLIntCallLinkInfo* addLLIntCallLinkInfo() | |
482 | { | |
483 | m_llintCallLinkInfos.append(LLIntCallLinkInfo()); | |
484 | return &m_llintCallLinkInfos.last(); | |
485 | } | |
4e4e5a6f A |
486 | #endif |
487 | #if ENABLE(JIT) | |
6fe7ccc8 | 488 | void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); } |
9dae56ea | 489 | size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); } |
9dae56ea A |
490 | StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; } |
491 | ||
14957cd0 A |
492 | void addGlobalResolveInfo(unsigned globalResolveInstruction) |
493 | { | |
6fe7ccc8 | 494 | m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction)); |
14957cd0 | 495 | } |
9dae56ea A |
496 | GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; } |
497 | bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset); | |
498 | ||
6fe7ccc8 | 499 | void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); } |
9dae56ea | 500 | size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); } |
9dae56ea A |
501 | CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; } |
502 | ||
14957cd0 | 503 | void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); } |
ba379fdc | 504 | MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; } |
6fe7ccc8 A |
505 | size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos.size(); } |
506 | #endif | |
507 | ||
508 | #if ENABLE(VALUE_PROFILER) | |
509 | unsigned numberOfArgumentValueProfiles() | |
510 | { | |
511 | ASSERT(m_numParameters >= 0); | |
512 | ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters)); | |
513 | return m_argumentValueProfiles.size(); | |
514 | } | |
515 | ValueProfile* valueProfileForArgument(unsigned argumentIndex) | |
516 | { | |
517 | ValueProfile* result = &m_argumentValueProfiles[argumentIndex]; | |
518 | ASSERT(result->m_bytecodeOffset == -1); | |
519 | return result; | |
520 | } | |
521 | ||
522 | ValueProfile* addValueProfile(int bytecodeOffset) | |
523 | { | |
524 | ASSERT(bytecodeOffset != -1); | |
525 | ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset); | |
526 | m_valueProfiles.append(ValueProfile(bytecodeOffset)); | |
527 | return &m_valueProfiles.last(); | |
528 | } | |
529 | unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } | |
530 | ValueProfile* valueProfile(int index) | |
531 | { | |
532 | ValueProfile* result = &m_valueProfiles[index]; | |
533 | ASSERT(result->m_bytecodeOffset != -1); | |
534 | return result; | |
535 | } | |
536 | ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset) | |
537 | { | |
538 | ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset); | |
539 | ASSERT(result->m_bytecodeOffset != -1); | |
540 | ASSERT(instructions()[bytecodeOffset + opcodeLength( | |
541 | m_globalData->interpreter->getOpcodeID( | |
542 | instructions()[ | |
543 | bytecodeOffset].u.opcode)) - 1].u.profile == result); | |
544 | return result; | |
545 | } | |
546 | PredictedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset) | |
547 | { | |
548 | return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(); | |
549 | } | |
550 | ||
551 | unsigned totalNumberOfValueProfiles() | |
552 | { | |
553 | return numberOfArgumentValueProfiles() + numberOfValueProfiles(); | |
554 | } | |
555 | ValueProfile* getFromAllValueProfiles(unsigned index) | |
556 | { | |
557 | if (index < numberOfArgumentValueProfiles()) | |
558 | return valueProfileForArgument(index); | |
559 | return valueProfile(index - numberOfArgumentValueProfiles()); | |
560 | } | |
561 | ||
562 | RareCaseProfile* addRareCaseProfile(int bytecodeOffset) | |
563 | { | |
564 | m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); | |
565 | return &m_rareCaseProfiles.last(); | |
566 | } | |
567 | unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); } | |
568 | RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; } | |
569 | RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset) | |
570 | { | |
571 | return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset); | |
572 | } | |
573 | ||
574 | bool likelyToTakeSlowCase(int bytecodeOffset) | |
575 | { | |
576 | if (!numberOfRareCaseProfiles()) | |
577 | return false; | |
578 | unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
579 | return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; | |
580 | } | |
581 | ||
582 | bool couldTakeSlowCase(int bytecodeOffset) | |
583 | { | |
584 | if (!numberOfRareCaseProfiles()) | |
585 | return false; | |
586 | unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
587 | return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold; | |
588 | } | |
589 | ||
590 | RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) | |
591 | { | |
592 | m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset)); | |
593 | return &m_specialFastCaseProfiles.last(); | |
594 | } | |
595 | unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); } | |
596 | RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; } | |
597 | RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset) | |
598 | { | |
599 | return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset); | |
600 | } | |
601 | ||
602 | bool likelyToTakeSpecialFastCase(int bytecodeOffset) | |
603 | { | |
604 | if (!numberOfRareCaseProfiles()) | |
605 | return false; | |
606 | unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
607 | return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; | |
608 | } | |
609 | ||
610 | bool likelyToTakeDeepestSlowCase(int bytecodeOffset) | |
611 | { | |
612 | if (!numberOfRareCaseProfiles()) | |
613 | return false; | |
614 | unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
615 | unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
616 | unsigned value = slowCaseCount - specialFastCaseCount; | |
617 | return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; | |
618 | } | |
619 | ||
620 | bool likelyToTakeAnySlowCase(int bytecodeOffset) | |
621 | { | |
622 | if (!numberOfRareCaseProfiles()) | |
623 | return false; | |
624 | unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
625 | unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; | |
626 | unsigned value = slowCaseCount + specialFastCaseCount; | |
627 | return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; | |
628 | } | |
629 | ||
630 | unsigned executionEntryCount() const { return m_executionEntryCount; } | |
9dae56ea | 631 | #endif |
6fe7ccc8 | 632 | |
14957cd0 A |
633 | unsigned globalResolveInfoCount() const |
634 | { | |
635 | #if ENABLE(JIT) | |
636 | if (m_globalData->canUseJIT()) | |
637 | return m_globalResolveInfos.size(); | |
638 | #endif | |
639 | return 0; | |
640 | } | |
9dae56ea A |
641 | |
642 | // Exception handling support | |
643 | ||
644 | size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } | |
645 | void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); } | |
646 | HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } | |
647 | ||
14957cd0 A |
648 | void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) |
649 | { | |
650 | createRareDataIfNecessary(); | |
651 | m_rareData->m_expressionInfo.append(expressionInfo); | |
652 | } | |
9dae56ea | 653 | |
14957cd0 A |
654 | void addLineInfo(unsigned bytecodeOffset, int lineNo) |
655 | { | |
656 | createRareDataIfNecessary(); | |
657 | Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo; | |
658 | if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) { | |
659 | LineInfo info = { bytecodeOffset, lineNo }; | |
660 | lineInfo.append(info); | |
661 | } | |
662 | } | |
9dae56ea | 663 | |
14957cd0 A |
664 | bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); } |
665 | bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); } | |
666 | // We only generate exception handling info if the user is debugging | |
667 | // (and may want line number info), or if the function contains exception handler. | |
668 | bool needsCallReturnIndices() | |
669 | { | |
670 | return m_rareData && | |
671 | (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size()); | |
672 | } | |
9dae56ea A |
673 | |
674 | #if ENABLE(JIT) | |
14957cd0 A |
675 | Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector() |
676 | { | |
677 | createRareDataIfNecessary(); | |
678 | return m_rareData->m_callReturnIndexVector; | |
679 | } | |
9dae56ea A |
680 | #endif |
681 | ||
6fe7ccc8 A |
682 | #if ENABLE(DFG_JIT) |
683 | SegmentedVector<InlineCallFrame, 4>& inlineCallFrames() | |
684 | { | |
685 | createRareDataIfNecessary(); | |
686 | return m_rareData->m_inlineCallFrames; | |
687 | } | |
688 | ||
689 | Vector<CodeOriginAtCallReturnOffset>& codeOrigins() | |
690 | { | |
691 | createRareDataIfNecessary(); | |
692 | return m_rareData->m_codeOrigins; | |
693 | } | |
694 | ||
695 | // Having code origins implies that there has been some inlining. | |
696 | bool hasCodeOrigins() | |
697 | { | |
698 | return m_rareData && !!m_rareData->m_codeOrigins.size(); | |
699 | } | |
700 | ||
701 | bool codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin) | |
702 | { | |
703 | if (!hasCodeOrigins()) | |
704 | return false; | |
705 | unsigned offset = getJITCode().offsetOf(returnAddress.value()); | |
706 | CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray); | |
707 | if (entry->callReturnOffset != offset) | |
708 | return false; | |
709 | codeOrigin = entry->codeOrigin; | |
710 | return true; | |
711 | } | |
712 | ||
713 | CodeOrigin codeOrigin(unsigned index) | |
714 | { | |
715 | ASSERT(m_rareData); | |
716 | return m_rareData->m_codeOrigins[index].codeOrigin; | |
717 | } | |
718 | ||
719 | bool addFrequentExitSite(const DFG::FrequentExitSite& site) | |
720 | { | |
721 | ASSERT(JITCode::isBaselineCode(getJITType())); | |
722 | return m_exitProfile.add(site); | |
723 | } | |
724 | ||
725 | DFG::ExitProfile& exitProfile() { return m_exitProfile; } | |
726 | ||
727 | CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles() | |
728 | { | |
729 | return m_lazyOperandValueProfiles; | |
730 | } | |
731 | #endif | |
732 | ||
9dae56ea A |
733 | // Constant Pool |
734 | ||
735 | size_t numberOfIdentifiers() const { return m_identifiers.size(); } | |
736 | void addIdentifier(const Identifier& i) { return m_identifiers.append(i); } | |
737 | Identifier& identifier(int index) { return m_identifiers[index]; } | |
738 | ||
739 | size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } | |
6fe7ccc8 | 740 | unsigned addConstant(JSValue v) |
14957cd0 | 741 | { |
6fe7ccc8 | 742 | unsigned result = m_constantRegisters.size(); |
14957cd0 A |
743 | m_constantRegisters.append(WriteBarrier<Unknown>()); |
744 | m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v); | |
6fe7ccc8 | 745 | return result; |
14957cd0 | 746 | } |
6fe7ccc8 | 747 | unsigned addOrFindConstant(JSValue); |
14957cd0 | 748 | WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } |
f9bf01c6 | 749 | ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } |
14957cd0 | 750 | ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } |
9dae56ea | 751 | |
14957cd0 A |
752 | unsigned addFunctionDecl(FunctionExecutable* n) |
753 | { | |
754 | unsigned size = m_functionDecls.size(); | |
755 | m_functionDecls.append(WriteBarrier<FunctionExecutable>()); | |
756 | m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n); | |
757 | return size; | |
758 | } | |
f9bf01c6 A |
759 | FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } |
760 | int numberOfFunctionDecls() { return m_functionDecls.size(); } | |
14957cd0 A |
761 | unsigned addFunctionExpr(FunctionExecutable* n) |
762 | { | |
763 | unsigned size = m_functionExprs.size(); | |
764 | m_functionExprs.append(WriteBarrier<FunctionExecutable>()); | |
765 | m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n); | |
766 | return size; | |
767 | } | |
f9bf01c6 | 768 | FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } |
9dae56ea | 769 | |
14957cd0 A |
770 | unsigned addRegExp(RegExp* r) |
771 | { | |
772 | createRareDataIfNecessary(); | |
773 | unsigned size = m_rareData->m_regexps.size(); | |
774 | m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r)); | |
775 | return size; | |
776 | } | |
6fe7ccc8 A |
777 | unsigned numberOfRegExps() const |
778 | { | |
779 | if (!m_rareData) | |
780 | return 0; | |
781 | return m_rareData->m_regexps.size(); | |
782 | } | |
9dae56ea A |
783 | RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); } |
784 | ||
14957cd0 A |
785 | unsigned addConstantBuffer(unsigned length) |
786 | { | |
787 | createRareDataIfNecessary(); | |
788 | unsigned size = m_rareData->m_constantBuffers.size(); | |
789 | m_rareData->m_constantBuffers.append(Vector<JSValue>(length)); | |
790 | return size; | |
791 | } | |
792 | ||
793 | JSValue* constantBuffer(unsigned index) | |
794 | { | |
795 | ASSERT(m_rareData); | |
796 | return m_rareData->m_constantBuffers[index].data(); | |
797 | } | |
798 | ||
799 | JSGlobalObject* globalObject() { return m_globalObject.get(); } | |
6fe7ccc8 A |
800 | |
801 | JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin) | |
802 | { | |
803 | if (!codeOrigin.inlineCallFrame) | |
804 | return globalObject(); | |
805 | // FIXME: if we ever inline based on executable not function, this code will need to change. | |
806 | return codeOrigin.inlineCallFrame->callee->scope()->globalObject.get(); | |
807 | } | |
9dae56ea A |
808 | |
809 | // Jump Tables | |
810 | ||
811 | size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; } | |
812 | SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); } | |
813 | SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; } | |
814 | ||
815 | size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; } | |
816 | SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); } | |
817 | SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; } | |
818 | ||
819 | size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } | |
820 | StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } | |
821 | StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } | |
822 | ||
823 | ||
f9bf01c6 A |
824 | SymbolTable* symbolTable() { return m_symbolTable; } |
825 | SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); } | |
9dae56ea | 826 | |
f9bf01c6 | 827 | EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } |
9dae56ea A |
828 | |
829 | void shrinkToFit(); | |
6fe7ccc8 A |
830 | |
831 | void copyPostParseDataFrom(CodeBlock* alternative); | |
832 | void copyPostParseDataFromAlternative(); | |
833 | ||
834 | // Functions for controlling when JITting kicks in, in a mixed mode | |
835 | // execution world. | |
836 | ||
837 | bool checkIfJITThresholdReached() | |
838 | { | |
839 | return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); | |
840 | } | |
841 | ||
842 | void dontJITAnytimeSoon() | |
843 | { | |
844 | m_llintExecuteCounter.deferIndefinitely(); | |
845 | } | |
846 | ||
847 | void jitAfterWarmUp() | |
848 | { | |
849 | m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp, this); | |
850 | } | |
851 | ||
852 | void jitSoon() | |
853 | { | |
854 | m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon, this); | |
855 | } | |
856 | ||
857 | int32_t llintExecuteCounter() const | |
858 | { | |
859 | return m_llintExecuteCounter.m_counter; | |
860 | } | |
861 | ||
862 | // Functions for controlling when tiered compilation kicks in. This | |
863 | // controls both when the optimizing compiler is invoked and when OSR | |
864 | // entry happens. Two triggers exist: the loop trigger and the return | |
865 | // trigger. In either case, when an addition to m_jitExecuteCounter | |
866 | // causes it to become non-negative, the optimizing compiler is | |
867 | // invoked. This includes a fast check to see if this CodeBlock has | |
868 | // already been optimized (i.e. replacement() returns a CodeBlock | |
869 | // that was optimized with a higher tier JIT than this one). In the | |
870 | // case of the loop trigger, if the optimized compilation succeeds | |
871 | // (or has already succeeded in the past) then OSR is attempted to | |
872 | // redirect program flow into the optimized code. | |
873 | ||
874 | // These functions are called from within the optimization triggers, | |
875 | // and are used as a single point at which we define the heuristics | |
876 | // for how much warm-up is mandated before the next optimization | |
877 | // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(), | |
878 | // as this is called from the CodeBlock constructor. | |
879 | ||
880 | // When we observe a lot of speculation failures, we trigger a | |
881 | // reoptimization. But each time, we increase the optimization trigger | |
882 | // to avoid thrashing. | |
883 | unsigned reoptimizationRetryCounter() const | |
884 | { | |
885 | ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax); | |
886 | return m_reoptimizationRetryCounter; | |
887 | } | |
888 | ||
889 | void countReoptimization() | |
890 | { | |
891 | m_reoptimizationRetryCounter++; | |
892 | if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax) | |
893 | m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax; | |
894 | } | |
895 | ||
896 | int32_t counterValueForOptimizeAfterWarmUp() | |
897 | { | |
898 | return Options::thresholdForOptimizeAfterWarmUp << reoptimizationRetryCounter(); | |
899 | } | |
900 | ||
901 | int32_t counterValueForOptimizeAfterLongWarmUp() | |
902 | { | |
903 | return Options::thresholdForOptimizeAfterLongWarmUp << reoptimizationRetryCounter(); | |
904 | } | |
905 | ||
906 | int32_t* addressOfJITExecuteCounter() | |
907 | { | |
908 | return &m_jitExecuteCounter.m_counter; | |
909 | } | |
910 | ||
911 | static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); } | |
912 | static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); } | |
913 | static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); } | |
9dae56ea | 914 | |
6fe7ccc8 A |
915 | int32_t jitExecuteCounter() const { return m_jitExecuteCounter.m_counter; } |
916 | ||
917 | unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } | |
918 | ||
919 | // Check if the optimization threshold has been reached, and if not, | |
920 | // adjust the heuristics accordingly. Returns true if the threshold has | |
921 | // been reached. | |
922 | bool checkIfOptimizationThresholdReached() | |
923 | { | |
924 | return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this); | |
925 | } | |
926 | ||
927 | // Call this to force the next optimization trigger to fire. This is | |
928 | // rarely wise, since optimization triggers are typically more | |
929 | // expensive than executing baseline code. | |
930 | void optimizeNextInvocation() | |
931 | { | |
932 | m_jitExecuteCounter.setNewThreshold(0, this); | |
933 | } | |
934 | ||
935 | // Call this to prevent optimization from happening again. Note that | |
936 | // optimization will still happen after roughly 2^29 invocations, | |
937 | // so this is really meant to delay that as much as possible. This | |
938 | // is called if optimization failed, and we expect it to fail in | |
939 | // the future as well. | |
940 | void dontOptimizeAnytimeSoon() | |
941 | { | |
942 | m_jitExecuteCounter.deferIndefinitely(); | |
943 | } | |
944 | ||
945 | // Call this to reinitialize the counter to its starting state, | |
946 | // forcing a warm-up to happen before the next optimization trigger | |
947 | // fires. This is called in the CodeBlock constructor. It also | |
948 | // makes sense to call this if an OSR exit occurred. Note that | |
949 | // OSR exit code is code generated, so the value of the execute | |
950 | // counter that this corresponds to is also available directly. | |
951 | void optimizeAfterWarmUp() | |
952 | { | |
953 | m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this); | |
954 | } | |
955 | ||
956 | // Call this to force an optimization trigger to fire only after | |
957 | // a lot of warm-up. | |
958 | void optimizeAfterLongWarmUp() | |
959 | { | |
960 | m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this); | |
961 | } | |
962 | ||
963 | // Call this to cause an optimization trigger to fire soon, but | |
964 | // not necessarily the next one. This makes sense if optimization | |
965 | // succeeds. Successfuly optimization means that all calls are | |
966 | // relinked to the optimized code, so this only affects call | |
967 | // frames that are still executing this CodeBlock. The value here | |
968 | // is tuned to strike a balance between the cost of OSR entry | |
969 | // (which is too high to warrant making every loop back edge to | |
970 | // trigger OSR immediately) and the cost of executing baseline | |
971 | // code (which is high enough that we don't necessarily want to | |
972 | // have a full warm-up). The intuition for calling this instead of | |
973 | // optimizeNextInvocation() is for the case of recursive functions | |
974 | // with loops. Consider that there may be N call frames of some | |
975 | // recursive function, for a reasonably large value of N. The top | |
976 | // one triggers optimization, and then returns, and then all of | |
977 | // the others return. We don't want optimization to be triggered on | |
978 | // each return, as that would be superfluous. It only makes sense | |
979 | // to trigger optimization if one of those functions becomes hot | |
980 | // in the baseline code. | |
981 | void optimizeSoon() | |
982 | { | |
983 | m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this); | |
984 | } | |
985 | ||
986 | // The speculative JIT tracks its success rate, so that we can | |
987 | // decide when to reoptimize. It's interesting to note that these | |
988 | // counters may overflow without any protection. The success | |
989 | // counter will overflow before the fail one does, becuase the | |
990 | // fail one is used as a trigger to reoptimize. So the worst case | |
991 | // is that the success counter overflows and we reoptimize without | |
992 | // needing to. But this is harmless. If a method really did | |
993 | // execute 2^32 times then compiling it again probably won't hurt | |
994 | // anyone. | |
995 | ||
996 | void countSpeculationSuccess() | |
997 | { | |
998 | m_speculativeSuccessCounter++; | |
999 | } | |
1000 | ||
1001 | void countSpeculationFailure() | |
1002 | { | |
1003 | m_speculativeFailCounter++; | |
1004 | } | |
1005 | ||
1006 | uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; } | |
1007 | uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; } | |
1008 | uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; } | |
1009 | ||
1010 | uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; } | |
1011 | uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; } | |
1012 | uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; } | |
1013 | ||
1014 | static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); } | |
1015 | static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); } | |
1016 | static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); } | |
1017 | ||
1018 | #if ENABLE(JIT) | |
1019 | // The number of failures that triggers the use of the ratio. | |
1020 | unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); } | |
1021 | unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); } | |
1022 | ||
1023 | bool shouldReoptimizeNow() | |
1024 | { | |
1025 | return (Options::desiredSpeculativeSuccessFailRatio * | |
1026 | speculativeFailCounter() >= speculativeSuccessCounter() | |
1027 | && speculativeFailCounter() >= largeFailCountThreshold()) | |
1028 | || forcedOSRExitCounter() >= | |
1029 | Options::forcedOSRExitCountForReoptimization; | |
1030 | } | |
1031 | ||
1032 | bool shouldReoptimizeFromLoopNow() | |
1033 | { | |
1034 | return (Options::desiredSpeculativeSuccessFailRatio * | |
1035 | speculativeFailCounter() >= speculativeSuccessCounter() | |
1036 | && speculativeFailCounter() >= largeFailCountThresholdForLoop()) | |
1037 | || forcedOSRExitCounter() >= | |
1038 | Options::forcedOSRExitCountForReoptimization; | |
1039 | } | |
1040 | #endif | |
1041 | ||
1042 | #if ENABLE(VALUE_PROFILER) | |
1043 | bool shouldOptimizeNow(); | |
1044 | #else | |
1045 | bool shouldOptimizeNow() { return false; } | |
1046 | #endif | |
1047 | ||
1048 | #if ENABLE(JIT) | |
1049 | void reoptimize() | |
1050 | { | |
1051 | ASSERT(replacement() != this); | |
1052 | ASSERT(replacement()->alternative() == this); | |
1053 | replacement()->tallyFrequentExitSites(); | |
1054 | replacement()->jettison(); | |
1055 | countReoptimization(); | |
1056 | optimizeAfterWarmUp(); | |
1057 | } | |
1058 | #endif | |
1059 | ||
1060 | #if ENABLE(VERBOSE_VALUE_PROFILE) | |
1061 | void dumpValueProfiles(); | |
1062 | #endif | |
1063 | ||
9dae56ea A |
1064 | // FIXME: Make these remaining members private. |
1065 | ||
1066 | int m_numCalleeRegisters; | |
9dae56ea | 1067 | int m_numVars; |
14957cd0 | 1068 | int m_numCapturedVars; |
14957cd0 | 1069 | bool m_isConstructor; |
9dae56ea | 1070 | |
6fe7ccc8 A |
1071 | protected: |
1072 | #if ENABLE(JIT) | |
1073 | virtual bool jitCompileImpl(JSGlobalData&) = 0; | |
1074 | #endif | |
1075 | virtual void visitWeakReferences(SlotVisitor&); | |
1076 | virtual void finalizeUnconditionally(); | |
1077 | ||
9dae56ea | 1078 | private: |
6fe7ccc8 A |
1079 | friend class DFGCodeBlocks; |
1080 | ||
1081 | #if ENABLE(DFG_JIT) | |
1082 | void tallyFrequentExitSites(); | |
1083 | #else | |
1084 | void tallyFrequentExitSites() { } | |
1085 | #endif | |
1086 | ||
9dae56ea | 1087 | void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const; |
f9bf01c6 A |
1088 | |
1089 | CString registerName(ExecState*, int r) const; | |
1090 | void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const; | |
1091 | void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const; | |
1092 | void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op) const; | |
1093 | void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const; | |
6fe7ccc8 | 1094 | void printCallOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const; |
f9bf01c6 | 1095 | void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const; |
14957cd0 | 1096 | void visitStructures(SlotVisitor&, Instruction* vPC) const; |
6fe7ccc8 A |
1097 | |
1098 | #if ENABLE(DFG_JIT) | |
1099 | bool shouldImmediatelyAssumeLivenessDuringScan() | |
1100 | { | |
1101 | // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT | |
1102 | // CodeBlocks don't need to be jettisoned when their weak references go | |
1103 | // stale. So if a basline JIT CodeBlock gets scanned, we can assume that | |
1104 | // this means that it's live. | |
1105 | if (!m_dfgData) | |
1106 | return true; | |
1107 | ||
1108 | // For simplicity, we don't attempt to jettison code blocks during GC if | |
1109 | // they are executing. Instead we strongly mark their weak references to | |
1110 | // allow them to continue to execute soundly. | |
1111 | if (m_dfgData->mayBeExecuting) | |
1112 | return true; | |
1113 | ||
1114 | return false; | |
1115 | } | |
1116 | #else | |
1117 | bool shouldImmediatelyAssumeLivenessDuringScan() { return true; } | |
1118 | #endif | |
1119 | ||
1120 | void performTracingFixpointIteration(SlotVisitor&); | |
1121 | ||
1122 | void stronglyVisitStrongReferences(SlotVisitor&); | |
1123 | void stronglyVisitWeakReferences(SlotVisitor&); | |
9dae56ea A |
1124 | |
1125 | void createRareDataIfNecessary() | |
1126 | { | |
1127 | if (!m_rareData) | |
14957cd0 | 1128 | m_rareData = adoptPtr(new RareData); |
9dae56ea | 1129 | } |
6fe7ccc8 A |
1130 | |
1131 | int m_numParameters; | |
9dae56ea | 1132 | |
14957cd0 | 1133 | WriteBarrier<ScriptExecutable> m_ownerExecutable; |
9dae56ea A |
1134 | JSGlobalData* m_globalData; |
1135 | ||
6fe7ccc8 | 1136 | RefCountedArray<Instruction> m_instructions; |
9dae56ea A |
1137 | |
1138 | int m_thisRegister; | |
14957cd0 A |
1139 | int m_argumentsRegister; |
1140 | int m_activationRegister; | |
9dae56ea A |
1141 | |
1142 | bool m_needsFullScopeChain; | |
1143 | bool m_usesEval; | |
9dae56ea | 1144 | bool m_isNumericCompareFunction; |
14957cd0 | 1145 | bool m_isStrictMode; |
9dae56ea A |
1146 | |
1147 | CodeType m_codeType; | |
1148 | ||
1149 | RefPtr<SourceProvider> m_source; | |
1150 | unsigned m_sourceOffset; | |
1151 | ||
9dae56ea A |
1152 | Vector<unsigned> m_propertyAccessInstructions; |
1153 | Vector<unsigned> m_globalResolveInstructions; | |
6fe7ccc8 A |
1154 | #if ENABLE(LLINT) |
1155 | SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos; | |
1156 | SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls; | |
4e4e5a6f A |
1157 | #endif |
1158 | #if ENABLE(JIT) | |
9dae56ea A |
1159 | Vector<StructureStubInfo> m_structureStubInfos; |
1160 | Vector<GlobalResolveInfo> m_globalResolveInfos; | |
1161 | Vector<CallLinkInfo> m_callLinkInfos; | |
ba379fdc | 1162 | Vector<MethodCallLinkInfo> m_methodCallLinkInfos; |
6fe7ccc8 A |
1163 | JITCode m_jitCode; |
1164 | MacroAssemblerCodePtr m_jitCodeWithArityCheck; | |
1165 | SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls; | |
1166 | #endif | |
1167 | #if ENABLE(DFG_JIT) || ENABLE(LLINT) | |
1168 | OwnPtr<CompactJITCodeMap> m_jitCodeMap; | |
1169 | #endif | |
1170 | #if ENABLE(DFG_JIT) | |
1171 | struct WeakReferenceTransition { | |
1172 | WeakReferenceTransition() { } | |
1173 | ||
1174 | WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to) | |
1175 | : m_from(globalData, owner, from) | |
1176 | , m_to(globalData, owner, to) | |
1177 | { | |
1178 | if (!!codeOrigin) | |
1179 | m_codeOrigin.set(globalData, owner, codeOrigin); | |
1180 | } | |
1181 | ||
1182 | WriteBarrier<JSCell> m_codeOrigin; | |
1183 | WriteBarrier<JSCell> m_from; | |
1184 | WriteBarrier<JSCell> m_to; | |
1185 | }; | |
1186 | ||
1187 | struct DFGData { | |
1188 | DFGData() | |
1189 | : mayBeExecuting(false) | |
1190 | , isJettisoned(false) | |
1191 | { | |
1192 | } | |
1193 | ||
1194 | Vector<DFG::OSREntryData> osrEntry; | |
1195 | SegmentedVector<DFG::OSRExit, 8> osrExit; | |
1196 | Vector<DFG::SpeculationRecovery> speculationRecovery; | |
1197 | Vector<WeakReferenceTransition> transitions; | |
1198 | Vector<WriteBarrier<JSCell> > weakReferences; | |
1199 | bool mayBeExecuting; | |
1200 | bool isJettisoned; | |
1201 | bool livenessHasBeenProved; // Initialized and used on every GC. | |
1202 | bool allTransitionsHaveBeenMarked; // Initialized and used on every GC. | |
1203 | unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations. | |
1204 | }; | |
1205 | ||
1206 | OwnPtr<DFGData> m_dfgData; | |
1207 | ||
1208 | // This is relevant to non-DFG code blocks that serve as the profiled code block | |
1209 | // for DFG code blocks. | |
1210 | DFG::ExitProfile m_exitProfile; | |
1211 | CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles; | |
1212 | #endif | |
1213 | #if ENABLE(VALUE_PROFILER) | |
1214 | Vector<ValueProfile> m_argumentValueProfiles; | |
1215 | SegmentedVector<ValueProfile, 8> m_valueProfiles; | |
1216 | SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; | |
1217 | SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles; | |
1218 | unsigned m_executionEntryCount; | |
9dae56ea A |
1219 | #endif |
1220 | ||
1221 | Vector<unsigned> m_jumpTargets; | |
6fe7ccc8 | 1222 | Vector<unsigned> m_loopTargets; |
9dae56ea A |
1223 | |
1224 | // Constant Pool | |
1225 | Vector<Identifier> m_identifiers; | |
14957cd0 A |
1226 | COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown); |
1227 | Vector<WriteBarrier<Unknown> > m_constantRegisters; | |
1228 | Vector<WriteBarrier<FunctionExecutable> > m_functionDecls; | |
1229 | Vector<WriteBarrier<FunctionExecutable> > m_functionExprs; | |
9dae56ea | 1230 | |
f9bf01c6 | 1231 | SymbolTable* m_symbolTable; |
9dae56ea | 1232 | |
6fe7ccc8 A |
1233 | OwnPtr<CodeBlock> m_alternative; |
1234 | ||
1235 | ExecutionCounter m_llintExecuteCounter; | |
1236 | ||
1237 | ExecutionCounter m_jitExecuteCounter; | |
1238 | int32_t m_totalJITExecutions; | |
1239 | uint32_t m_speculativeSuccessCounter; | |
1240 | uint32_t m_speculativeFailCounter; | |
1241 | uint32_t m_forcedOSRExitCounter; | |
1242 | uint16_t m_optimizationDelayCounter; | |
1243 | uint16_t m_reoptimizationRetryCounter; | |
1244 | ||
14957cd0 A |
1245 | struct RareData { |
1246 | WTF_MAKE_FAST_ALLOCATED; | |
1247 | public: | |
9dae56ea A |
1248 | Vector<HandlerInfo> m_exceptionHandlers; |
1249 | ||
1250 | // Rare Constants | |
14957cd0 | 1251 | Vector<WriteBarrier<RegExp> > m_regexps; |
9dae56ea | 1252 | |
14957cd0 A |
1253 | // Buffers used for large array literals |
1254 | Vector<Vector<JSValue> > m_constantBuffers; | |
1255 | ||
9dae56ea A |
1256 | // Jump Tables |
1257 | Vector<SimpleJumpTable> m_immediateSwitchJumpTables; | |
1258 | Vector<SimpleJumpTable> m_characterSwitchJumpTables; | |
1259 | Vector<StringJumpTable> m_stringSwitchJumpTables; | |
1260 | ||
1261 | EvalCodeCache m_evalCodeCache; | |
1262 | ||
14957cd0 A |
1263 | // Expression info - present if debugging. |
1264 | Vector<ExpressionRangeInfo> m_expressionInfo; | |
1265 | // Line info - present if profiling or debugging. | |
1266 | Vector<LineInfo> m_lineInfo; | |
9dae56ea | 1267 | #if ENABLE(JIT) |
14957cd0 | 1268 | Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector; |
6fe7ccc8 A |
1269 | #endif |
1270 | #if ENABLE(DFG_JIT) | |
1271 | SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames; | |
1272 | Vector<CodeOriginAtCallReturnOffset> m_codeOrigins; | |
9dae56ea A |
1273 | #endif |
1274 | }; | |
14957cd0 A |
1275 | #if COMPILER(MSVC) |
1276 | friend void WTF::deleteOwnedPtr<RareData>(RareData*); | |
1277 | #endif | |
9dae56ea | 1278 | OwnPtr<RareData> m_rareData; |
6fe7ccc8 A |
1279 | #if ENABLE(JIT) |
1280 | CompileWithDFGState m_canCompileWithDFGState; | |
1281 | #endif | |
9dae56ea A |
1282 | }; |
1283 | ||
1284 | // Program code is not marked by any function, so we make the global object | |
1285 | // responsible for marking it. | |
1286 | ||
f9bf01c6 | 1287 | class GlobalCodeBlock : public CodeBlock { |
6fe7ccc8 A |
1288 | protected: |
1289 | GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other) | |
1290 | : CodeBlock(CopyParsedBlock, other, &m_unsharedSymbolTable) | |
1291 | , m_unsharedSymbolTable(other.m_unsharedSymbolTable) | |
1292 | { | |
1293 | } | |
1294 | ||
1295 | GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative) | |
1296 | : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative) | |
9dae56ea | 1297 | { |
9dae56ea A |
1298 | } |
1299 | ||
9dae56ea | 1300 | private: |
f9bf01c6 A |
1301 | SymbolTable m_unsharedSymbolTable; |
1302 | }; | |
1303 | ||
1304 | class ProgramCodeBlock : public GlobalCodeBlock { | |
1305 | public: | |
6fe7ccc8 A |
1306 | ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other) |
1307 | : GlobalCodeBlock(CopyParsedBlock, other) | |
f9bf01c6 A |
1308 | { |
1309 | } | |
6fe7ccc8 A |
1310 | |
1311 | ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative) | |
1312 | : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative) | |
1313 | { | |
1314 | } | |
1315 | ||
1316 | #if ENABLE(JIT) | |
1317 | protected: | |
1318 | virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*); | |
1319 | virtual void jettison(); | |
1320 | virtual bool jitCompileImpl(JSGlobalData&); | |
1321 | virtual CodeBlock* replacement(); | |
1322 | virtual bool canCompileWithDFGInternal(); | |
1323 | #endif | |
9dae56ea A |
1324 | }; |
1325 | ||
f9bf01c6 | 1326 | class EvalCodeBlock : public GlobalCodeBlock { |
9dae56ea | 1327 | public: |
6fe7ccc8 A |
1328 | EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other) |
1329 | : GlobalCodeBlock(CopyParsedBlock, other) | |
1330 | , m_baseScopeDepth(other.m_baseScopeDepth) | |
1331 | , m_variables(other.m_variables) | |
1332 | { | |
1333 | } | |
1334 | ||
1335 | EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative) | |
1336 | : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative) | |
9dae56ea A |
1337 | , m_baseScopeDepth(baseScopeDepth) |
1338 | { | |
1339 | } | |
1340 | ||
1341 | int baseScopeDepth() const { return m_baseScopeDepth; } | |
1342 | ||
f9bf01c6 A |
1343 | const Identifier& variable(unsigned index) { return m_variables[index]; } |
1344 | unsigned numVariables() { return m_variables.size(); } | |
1345 | void adoptVariables(Vector<Identifier>& variables) | |
1346 | { | |
1347 | ASSERT(m_variables.isEmpty()); | |
1348 | m_variables.swap(variables); | |
1349 | } | |
6fe7ccc8 A |
1350 | |
1351 | #if ENABLE(JIT) | |
1352 | protected: | |
1353 | virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*); | |
1354 | virtual void jettison(); | |
1355 | virtual bool jitCompileImpl(JSGlobalData&); | |
1356 | virtual CodeBlock* replacement(); | |
1357 | virtual bool canCompileWithDFGInternal(); | |
1358 | #endif | |
f9bf01c6 | 1359 | |
9dae56ea A |
1360 | private: |
1361 | int m_baseScopeDepth; | |
f9bf01c6 A |
1362 | Vector<Identifier> m_variables; |
1363 | }; | |
1364 | ||
1365 | class FunctionCodeBlock : public CodeBlock { | |
1366 | public: | |
6fe7ccc8 A |
1367 | FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other) |
1368 | : CodeBlock(CopyParsedBlock, other, other.sharedSymbolTable()) | |
1369 | { | |
1370 | // The fact that we have to do this is yucky, but is necessary because of the | |
1371 | // class hierarchy issues described in the comment block for the main | |
1372 | // constructor, below. | |
1373 | sharedSymbolTable()->ref(); | |
1374 | } | |
1375 | ||
f9bf01c6 A |
1376 | // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new |
1377 | // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared | |
1378 | // symbol table, so we just pass as a raw pointer with a ref count of 1. We then manually deref | |
1379 | // in the destructor. | |
6fe7ccc8 A |
1380 | FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr) |
1381 | : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative) | |
f9bf01c6 A |
1382 | { |
1383 | } | |
1384 | ~FunctionCodeBlock() | |
1385 | { | |
1386 | sharedSymbolTable()->deref(); | |
1387 | } | |
6fe7ccc8 A |
1388 | |
1389 | #if ENABLE(JIT) | |
1390 | protected: | |
1391 | virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*); | |
1392 | virtual void jettison(); | |
1393 | virtual bool jitCompileImpl(JSGlobalData&); | |
1394 | virtual CodeBlock* replacement(); | |
1395 | virtual bool canCompileWithDFGInternal(); | |
1396 | #endif | |
9dae56ea A |
1397 | }; |
1398 | ||
6fe7ccc8 A |
1399 | inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame) |
1400 | { | |
1401 | ASSERT(inlineCallFrame); | |
1402 | ExecutableBase* executable = inlineCallFrame->executable.get(); | |
1403 | ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info); | |
1404 | return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct); | |
1405 | } | |
1406 | ||
1407 | inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) | |
1408 | { | |
1409 | if (codeOrigin.inlineCallFrame) | |
1410 | return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); | |
1411 | return baselineCodeBlock; | |
1412 | } | |
1413 | ||
1414 | ||
ba379fdc A |
1415 | inline Register& ExecState::r(int index) |
1416 | { | |
1417 | CodeBlock* codeBlock = this->codeBlock(); | |
1418 | if (codeBlock->isConstantRegisterIndex(index)) | |
14957cd0 | 1419 | return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index)); |
ba379fdc A |
1420 | return this[index]; |
1421 | } | |
1422 | ||
14957cd0 A |
1423 | inline Register& ExecState::uncheckedR(int index) |
1424 | { | |
1425 | ASSERT(index < FirstConstantRegisterIndex); | |
1426 | return this[index]; | |
1427 | } | |
6fe7ccc8 A |
1428 | |
1429 | #if ENABLE(DFG_JIT) | |
1430 | inline bool ExecState::isInlineCallFrame() | |
1431 | { | |
1432 | if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT)) | |
1433 | return false; | |
1434 | return isInlineCallFrameSlow(); | |
1435 | } | |
1436 | #endif | |
1437 | ||
1438 | #if ENABLE(DFG_JIT) | |
1439 | inline void DFGCodeBlocks::mark(void* candidateCodeBlock) | |
1440 | { | |
1441 | // We have to check for 0 and -1 because those are used by the HashMap as markers. | |
1442 | uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock); | |
1443 | ||
1444 | // This checks for both of those nasty cases in one go. | |
1445 | // 0 + 1 = 1 | |
1446 | // -1 + 1 = 0 | |
1447 | if (value + 1 <= 1) | |
1448 | return; | |
1449 | ||
1450 | HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock)); | |
1451 | if (iter == m_set.end()) | |
1452 | return; | |
1453 | ||
1454 | (*iter)->m_dfgData->mayBeExecuting = true; | |
1455 | } | |
1456 | #endif | |
14957cd0 | 1457 | |
9dae56ea A |
1458 | } // namespace JSC |
1459 | ||
1460 | #endif // CodeBlock_h |