]> git.saurik.com Git - apple/javascriptcore.git/blob - bytecode/CodeBlock.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / bytecode / CodeBlock.h
1 /*
2 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #ifndef CodeBlock_h
31 #define CodeBlock_h
32
33 #include "ArrayProfile.h"
34 #include "ByValInfo.h"
35 #include "BytecodeConventions.h"
36 #include "CallLinkInfo.h"
37 #include "CallReturnOffsetToBytecodeOffset.h"
38 #include "CodeBlockHash.h"
39 #include "CodeOrigin.h"
40 #include "CodeType.h"
41 #include "CompactJITCodeMap.h"
42 #include "DFGCodeBlocks.h"
43 #include "DFGCommon.h"
44 #include "DFGExitProfile.h"
45 #include "DFGMinifiedGraph.h"
46 #include "DFGOSREntry.h"
47 #include "DFGOSRExit.h"
48 #include "DFGVariableEventStream.h"
49 #include "EvalCodeCache.h"
50 #include "ExecutionCounter.h"
51 #include "ExpressionRangeInfo.h"
52 #include "HandlerInfo.h"
53 #include "ObjectAllocationProfile.h"
54 #include "Options.h"
55 #include "Instruction.h"
56 #include "JITCode.h"
57 #include "JITWriteBarrier.h"
58 #include "JSGlobalObject.h"
59 #include "JumpReplacementWatchpoint.h"
60 #include "JumpTable.h"
61 #include "LLIntCallLinkInfo.h"
62 #include "LazyOperandValueProfile.h"
63 #include "LineInfo.h"
64 #include "ProfilerCompilation.h"
65 #include "RegExpObject.h"
66 #include "ResolveOperation.h"
67 #include "StructureStubInfo.h"
68 #include "UnconditionalFinalizer.h"
69 #include "ValueProfile.h"
70 #include "Watchpoint.h"
71 #include <wtf/RefCountedArray.h>
72 #include <wtf/FastAllocBase.h>
73 #include <wtf/PassOwnPtr.h>
74 #include <wtf/Platform.h>
75 #include <wtf/RefPtr.h>
76 #include <wtf/SegmentedVector.h>
77 #include <wtf/Vector.h>
78 #include <wtf/text/WTFString.h>
79
80 namespace JSC {
81
82 class DFGCodeBlocks;
83 class ExecState;
84 class LLIntOffsetsExtractor;
85 class RepatchBuffer;
86
87 inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
88
89 static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
90
91 class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
92 WTF_MAKE_FAST_ALLOCATED;
93 friend class JIT;
94 friend class LLIntOffsetsExtractor;
95 public:
96 enum CopyParsedBlockTag { CopyParsedBlock };
97 protected:
98 CodeBlock(CopyParsedBlockTag, CodeBlock& other);
99
100 CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSGlobalObject*, unsigned baseScopeDepth, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative);
101
102 WriteBarrier<JSGlobalObject> m_globalObject;
103 Heap* m_heap;
104
105 public:
106 JS_EXPORT_PRIVATE virtual ~CodeBlock();
107
108 UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
109
110 String inferredName() const;
111 CodeBlockHash hash() const;
112 String sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
113 String sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
114 void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
115 void dump(PrintStream&) const;
116
117 int numParameters() const { return m_numParameters; }
118 void setNumParameters(int newValue);
119
120 int* addressOfNumParameters() { return &m_numParameters; }
121 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
122
123 CodeBlock* alternative() { return m_alternative.get(); }
124 PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
125 void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
126
127 CodeSpecializationKind specializationKind() const
128 {
129 return specializationFromIsConstruct(m_isConstructor);
130 }
131
132 #if ENABLE(JIT)
133 CodeBlock* baselineVersion()
134 {
135 CodeBlock* result = replacement();
136 if (!result)
137 return 0; // This can happen if we're in the process of creating the baseline version.
138 while (result->alternative())
139 result = result->alternative();
140 ASSERT(result);
141 ASSERT(JITCode::isBaselineCode(result->getJITType()));
142 return result;
143 }
144 #else
145 CodeBlock* baselineVersion()
146 {
147 return this;
148 }
149 #endif
150
151 void visitAggregate(SlotVisitor&);
152
153 static void dumpStatistics();
154
155 void dumpBytecode(PrintStream& = WTF::dataFile());
156 void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
157 void printStructures(PrintStream&, const Instruction*);
158 void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
159
160 bool isStrictMode() const { return m_isStrictMode; }
161
162 inline bool isKnownNotImmediate(int index)
163 {
164 if (index == m_thisRegister && !m_isStrictMode)
165 return true;
166
167 if (isConstantRegisterIndex(index))
168 return getConstant(index).isCell();
169
170 return false;
171 }
172
173 ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
174 {
175 return index >= m_numVars;
176 }
177
178 HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
179 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
180 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
181 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
182 int& startOffset, int& endOffset, unsigned& line, unsigned& column);
183
184 #if ENABLE(JIT)
185
186 StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
187 {
188 return *(binarySearch<StructureStubInfo, void*>(m_structureStubInfos, m_structureStubInfos.size(), returnAddress.value(), getStructureStubInfoReturnLocation));
189 }
190
191 StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
192 {
193 return *(binarySearch<StructureStubInfo, unsigned>(m_structureStubInfos, m_structureStubInfos.size(), bytecodeIndex, getStructureStubInfoBytecodeIndex));
194 }
195
196 void resetStub(StructureStubInfo&);
197
198 ByValInfo& getByValInfo(unsigned bytecodeIndex)
199 {
200 return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
201 }
202
203 CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
204 {
205 return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
206 }
207
208 CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
209 {
210 ASSERT(JITCode::isBaselineCode(getJITType()));
211 return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
212 }
213 #endif // ENABLE(JIT)
214
215 #if ENABLE(LLINT)
216 Instruction* adjustPCIfAtCallSite(Instruction*);
217 #endif
218 unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
219
220 #if ENABLE(JIT)
221 unsigned bytecodeOffsetForCallAtIndex(unsigned index)
222 {
223 if (!m_rareData)
224 return 1;
225 Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
226 if (!callIndices.size())
227 return 1;
228 // FIXME: Fix places in DFG that call out to C that don't set the CodeOrigin. https://bugs.webkit.org/show_bug.cgi?id=118315
229 ASSERT(index < m_rareData->m_callReturnIndexVector.size());
230 if (index >= m_rareData->m_callReturnIndexVector.size())
231 return 1;
232 return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
233 }
234
235 void unlinkCalls();
236
237 bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
238
239 void linkIncomingCall(CallLinkInfo* incoming)
240 {
241 m_incomingCalls.push(incoming);
242 }
243
244 bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
245 {
246 return m_incomingCalls.isOnList(incoming);
247 }
248 #endif // ENABLE(JIT)
249
250 #if ENABLE(LLINT)
251 void linkIncomingCall(LLIntCallLinkInfo* incoming)
252 {
253 m_incomingLLIntCalls.push(incoming);
254 }
255 #endif // ENABLE(LLINT)
256
257 void unlinkIncomingCalls();
258
259 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
260 void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
261 {
262 m_jitCodeMap = jitCodeMap;
263 }
264 CompactJITCodeMap* jitCodeMap()
265 {
266 return m_jitCodeMap.get();
267 }
268 #endif
269
270 #if ENABLE(DFG_JIT)
271 void createDFGDataIfNecessary()
272 {
273 if (!!m_dfgData)
274 return;
275
276 m_dfgData = adoptPtr(new DFGData);
277 }
278
279 void saveCompilation(PassRefPtr<Profiler::Compilation> compilation)
280 {
281 createDFGDataIfNecessary();
282 m_dfgData->compilation = compilation;
283 }
284
285 Profiler::Compilation* compilation()
286 {
287 if (!m_dfgData)
288 return 0;
289 return m_dfgData->compilation.get();
290 }
291
292 DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
293 {
294 createDFGDataIfNecessary();
295 DFG::OSREntryData entry;
296 entry.m_bytecodeIndex = bytecodeIndex;
297 entry.m_machineCodeOffset = machineCodeOffset;
298 m_dfgData->osrEntry.append(entry);
299 return &m_dfgData->osrEntry.last();
300 }
301 unsigned numberOfDFGOSREntries() const
302 {
303 if (!m_dfgData)
304 return 0;
305 return m_dfgData->osrEntry.size();
306 }
307 DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
308 DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
309 {
310 if (!m_dfgData)
311 return 0;
312 return tryBinarySearch<DFG::OSREntryData, unsigned>(
313 m_dfgData->osrEntry, m_dfgData->osrEntry.size(), bytecodeIndex,
314 DFG::getOSREntryDataBytecodeIndex);
315 }
316
317 unsigned appendOSRExit(const DFG::OSRExit& osrExit)
318 {
319 createDFGDataIfNecessary();
320 unsigned result = m_dfgData->osrExit.size();
321 m_dfgData->osrExit.append(osrExit);
322 return result;
323 }
324
325 DFG::OSRExit& lastOSRExit()
326 {
327 return m_dfgData->osrExit.last();
328 }
329
330 unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
331 {
332 createDFGDataIfNecessary();
333 unsigned result = m_dfgData->speculationRecovery.size();
334 m_dfgData->speculationRecovery.append(recovery);
335 return result;
336 }
337
338 unsigned appendWatchpoint(const JumpReplacementWatchpoint& watchpoint)
339 {
340 createDFGDataIfNecessary();
341 unsigned result = m_dfgData->watchpoints.size();
342 m_dfgData->watchpoints.append(watchpoint);
343 return result;
344 }
345
346 unsigned numberOfOSRExits()
347 {
348 if (!m_dfgData)
349 return 0;
350 return m_dfgData->osrExit.size();
351 }
352
353 unsigned numberOfSpeculationRecoveries()
354 {
355 if (!m_dfgData)
356 return 0;
357 return m_dfgData->speculationRecovery.size();
358 }
359
360 unsigned numberOfWatchpoints()
361 {
362 if (!m_dfgData)
363 return 0;
364 return m_dfgData->watchpoints.size();
365 }
366
367 DFG::OSRExit& osrExit(unsigned index)
368 {
369 return m_dfgData->osrExit[index];
370 }
371
372 DFG::SpeculationRecovery& speculationRecovery(unsigned index)
373 {
374 return m_dfgData->speculationRecovery[index];
375 }
376
377 JumpReplacementWatchpoint& watchpoint(unsigned index)
378 {
379 return m_dfgData->watchpoints[index];
380 }
381
382 void appendWeakReference(JSCell* target)
383 {
384 createDFGDataIfNecessary();
385 m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*vm(), ownerExecutable(), target));
386 }
387
388 void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
389 {
390 createDFGDataIfNecessary();
391 m_dfgData->transitions.append(
392 WeakReferenceTransition(*vm(), ownerExecutable(), codeOrigin, from, to));
393 }
394
395 DFG::MinifiedGraph& minifiedDFG()
396 {
397 createDFGDataIfNecessary();
398 return m_dfgData->minifiedDFG;
399 }
400
401 DFG::VariableEventStream& variableEventStream()
402 {
403 createDFGDataIfNecessary();
404 return m_dfgData->variableEventStream;
405 }
406 #endif
407
408 unsigned bytecodeOffset(Instruction* returnAddress)
409 {
410 RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
411 return static_cast<Instruction*>(returnAddress) - instructions().begin();
412 }
413
414 bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
415
416 unsigned numberOfInstructions() const { return m_instructions.size(); }
417 RefCountedArray<Instruction>& instructions() { return m_instructions; }
418 const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
419
420 size_t predictedMachineCodeSize();
421
422 bool usesOpcode(OpcodeID);
423
424 unsigned instructionCount() { return m_instructions.size(); }
425
426 int argumentIndexAfterCapture(size_t argument);
427
428 #if ENABLE(JIT)
429 void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
430 {
431 m_jitCode = code;
432 m_jitCodeWithArityCheck = codeWithArityCheck;
433 #if ENABLE(DFG_JIT)
434 if (m_jitCode.jitType() == JITCode::DFGJIT) {
435 createDFGDataIfNecessary();
436 m_vm->heap.m_dfgCodeBlocks.m_set.add(this);
437 }
438 #endif
439 }
440 JITCode& getJITCode() { return m_jitCode; }
441 MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
442 JITCode::JITType getJITType() const { return m_jitCode.jitType(); }
443 ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
444 virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex) = 0;
445 void jettison();
446 enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
447 JITCompilationResult jitCompile(ExecState* exec)
448 {
449 if (getJITType() != JITCode::InterpreterThunk) {
450 ASSERT(getJITType() == JITCode::BaselineJIT);
451 return AlreadyCompiled;
452 }
453 #if ENABLE(JIT)
454 if (jitCompileImpl(exec))
455 return CompiledSuccessfully;
456 return CouldNotCompile;
457 #else
458 UNUSED_PARAM(exec);
459 return CouldNotCompile;
460 #endif
461 }
462 virtual CodeBlock* replacement() = 0;
463
464 virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0;
465 DFG::CapabilityLevel canCompileWithDFG()
466 {
467 DFG::CapabilityLevel result = canCompileWithDFGInternal();
468 m_canCompileWithDFGState = result;
469 return result;
470 }
471 DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; }
472
473 bool hasOptimizedReplacement()
474 {
475 ASSERT(JITCode::isBaselineCode(getJITType()));
476 bool result = replacement()->getJITType() > getJITType();
477 #if !ASSERT_DISABLED
478 if (result)
479 ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
480 else {
481 ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
482 ASSERT(replacement() == this);
483 }
484 #endif
485 return result;
486 }
487 #else
488 JITCode::JITType getJITType() const { return JITCode::BaselineJIT; }
489 #endif
490
491 ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
492
493 void setVM(VM* vm) { m_vm = vm; }
494 VM* vm() { return m_vm; }
495
496 void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
497 int thisRegister() const { return m_thisRegister; }
498
499 bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
500 bool usesEval() const { return m_unlinkedCode->usesEval(); }
501
502 void setArgumentsRegister(int argumentsRegister)
503 {
504 ASSERT(argumentsRegister != -1);
505 m_argumentsRegister = argumentsRegister;
506 ASSERT(usesArguments());
507 }
508 int argumentsRegister() const
509 {
510 ASSERT(usesArguments());
511 return m_argumentsRegister;
512 }
513 int uncheckedArgumentsRegister()
514 {
515 if (!usesArguments())
516 return InvalidVirtualRegister;
517 return argumentsRegister();
518 }
519 void setActivationRegister(int activationRegister)
520 {
521 m_activationRegister = activationRegister;
522 }
523 int activationRegister() const
524 {
525 ASSERT(needsFullScopeChain());
526 return m_activationRegister;
527 }
528 int uncheckedActivationRegister()
529 {
530 if (!needsFullScopeChain())
531 return InvalidVirtualRegister;
532 return activationRegister();
533 }
534 bool usesArguments() const { return m_argumentsRegister != -1; }
535
536 bool needsActivation() const
537 {
538 return needsFullScopeChain() && codeType() != GlobalCode;
539 }
540
541 bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const
542 {
543 if (operandIsArgument(operand))
544 return operandToArgument(operand) && usesArguments();
545
546 if (inlineCallFrame)
547 return inlineCallFrame->capturedVars.get(operand);
548
549 // The activation object isn't in the captured region, but it's "captured"
550 // in the sense that stores to its location can be observed indirectly.
551 if (needsActivation() && operand == activationRegister())
552 return true;
553
554 // Ditto for the arguments object.
555 if (usesArguments() && operand == argumentsRegister())
556 return true;
557
558 // Ditto for the arguments object.
559 if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
560 return true;
561
562 // We're in global code so there are no locals to capture
563 if (!symbolTable())
564 return false;
565
566 return operand >= symbolTable()->captureStart()
567 && operand < symbolTable()->captureEnd();
568 }
569
570 CodeType codeType() const { return m_unlinkedCode->codeType(); }
571
572 SourceProvider* source() const { return m_source.get(); }
573 unsigned sourceOffset() const { return m_sourceOffset; }
574 unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
575
576 size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
577 unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
578
579 void createActivation(CallFrame*);
580
581 void clearEvalCache();
582
583 String nameForRegister(int registerNumber);
584
585 #if ENABLE(JIT)
586 void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
587 size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
588 StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
589
590 void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
591 size_t numberOfByValInfos() const { return m_byValInfos.size(); }
592 ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
593
594 void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
595 size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
596 CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
597 #endif
598
599 #if ENABLE(VALUE_PROFILER)
600 unsigned numberOfArgumentValueProfiles()
601 {
602 ASSERT(m_numParameters >= 0);
603 ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
604 return m_argumentValueProfiles.size();
605 }
606 ValueProfile* valueProfileForArgument(unsigned argumentIndex)
607 {
608 ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
609 ASSERT(result->m_bytecodeOffset == -1);
610 return result;
611 }
612
613 unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
614 ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
615 ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
616 {
617 ValueProfile* result = binarySearch<ValueProfile, int>(
618 m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
619 getValueProfileBytecodeOffset<ValueProfile>);
620 ASSERT(result->m_bytecodeOffset != -1);
621 ASSERT(instructions()[bytecodeOffset + opcodeLength(
622 m_vm->interpreter->getOpcodeID(
623 instructions()[
624 bytecodeOffset].u.opcode)) - 1].u.profile == result);
625 return result;
626 }
627 SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
628 {
629 return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
630 }
631
632 unsigned totalNumberOfValueProfiles()
633 {
634 return numberOfArgumentValueProfiles() + numberOfValueProfiles();
635 }
636 ValueProfile* getFromAllValueProfiles(unsigned index)
637 {
638 if (index < numberOfArgumentValueProfiles())
639 return valueProfileForArgument(index);
640 return valueProfile(index - numberOfArgumentValueProfiles());
641 }
642
643 RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
644 {
645 m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
646 return &m_rareCaseProfiles.last();
647 }
648 unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
649 RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
650 RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
651 {
652 return tryBinarySearch<RareCaseProfile, int>(
653 m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
654 getRareCaseProfileBytecodeOffset);
655 }
656
657 bool likelyToTakeSlowCase(int bytecodeOffset)
658 {
659 if (!numberOfRareCaseProfiles())
660 return false;
661 unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
662 return value >= Options::likelyToTakeSlowCaseMinimumCount();
663 }
664
665 bool couldTakeSlowCase(int bytecodeOffset)
666 {
667 if (!numberOfRareCaseProfiles())
668 return false;
669 unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
670 return value >= Options::couldTakeSlowCaseMinimumCount();
671 }
672
673 RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
674 {
675 m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
676 return &m_specialFastCaseProfiles.last();
677 }
678 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
679 RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
680 RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
681 {
682 return tryBinarySearch<RareCaseProfile, int>(
683 m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
684 getRareCaseProfileBytecodeOffset);
685 }
686
687 bool likelyToTakeSpecialFastCase(int bytecodeOffset)
688 {
689 if (!numberOfRareCaseProfiles())
690 return false;
691 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
692 return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
693 }
694
695 bool couldTakeSpecialFastCase(int bytecodeOffset)
696 {
697 if (!numberOfRareCaseProfiles())
698 return false;
699 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
700 return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
701 }
702
703 bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
704 {
705 if (!numberOfRareCaseProfiles())
706 return false;
707 unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
708 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
709 unsigned value = slowCaseCount - specialFastCaseCount;
710 return value >= Options::likelyToTakeSlowCaseMinimumCount();
711 }
712
713 bool likelyToTakeAnySlowCase(int bytecodeOffset)
714 {
715 if (!numberOfRareCaseProfiles())
716 return false;
717 unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
718 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
719 unsigned value = slowCaseCount + specialFastCaseCount;
720 return value >= Options::likelyToTakeSlowCaseMinimumCount();
721 }
722
723 unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
724 const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
725 ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
726 {
727 m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
728 return &m_arrayProfiles.last();
729 }
730 ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
731 ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
732 #endif
733
734 // Exception handling support
735
736 size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
737 void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
738 {
739 size_t count = unlinkedHandlers.size();
740 if (!count)
741 return;
742 createRareDataIfNecessary();
743 m_rareData->m_exceptionHandlers.resize(count);
744 for (size_t i = 0; i < count; ++i) {
745 m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
746 m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
747 m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
748 m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
749 }
750
751 }
752 HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
753
754 bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
755
756 #if ENABLE(JIT)
757 Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callReturnIndexVector()
758 {
759 createRareDataIfNecessary();
760 return m_rareData->m_callReturnIndexVector;
761 }
762 #endif
763
764 #if ENABLE(DFG_JIT)
765 SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
766 {
767 createRareDataIfNecessary();
768 return m_rareData->m_inlineCallFrames;
769 }
770
771 Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins()
772 {
773 createRareDataIfNecessary();
774 return m_rareData->m_codeOrigins;
775 }
776
777 // Having code origins implies that there has been some inlining.
778 bool hasCodeOrigins()
779 {
780 return m_rareData && !!m_rareData->m_codeOrigins.size();
781 }
782
783 bool codeOriginForReturn(ReturnAddressPtr, CodeOrigin&);
784
785 bool canGetCodeOrigin(unsigned index)
786 {
787 if (!m_rareData)
788 return false;
789 return m_rareData->m_codeOrigins.size() > index;
790 }
791
792 CodeOrigin codeOrigin(unsigned index)
793 {
794 RELEASE_ASSERT(m_rareData);
795 return m_rareData->m_codeOrigins[index].codeOrigin;
796 }
797
798 bool addFrequentExitSite(const DFG::FrequentExitSite& site)
799 {
800 ASSERT(JITCode::isBaselineCode(getJITType()));
801 return m_exitProfile.add(site);
802 }
803
804 bool hasExitSite(const DFG::FrequentExitSite& site) const { return m_exitProfile.hasExitSite(site); }
805
806 DFG::ExitProfile& exitProfile() { return m_exitProfile; }
807
808 CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
809 {
810 return m_lazyOperandValueProfiles;
811 }
812 #endif
813
814 // Constant Pool
815
816 size_t numberOfIdentifiers() const { return m_identifiers.size(); }
817 void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
818 Identifier& identifier(int index) { return m_identifiers[index]; }
819
820 size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
821 unsigned addConstant(JSValue v)
822 {
823 unsigned result = m_constantRegisters.size();
824 m_constantRegisters.append(WriteBarrier<Unknown>());
825 m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
826 return result;
827 }
828
829
830 unsigned addOrFindConstant(JSValue);
831 WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
832 ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
833 ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
834
835 FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
836 int numberOfFunctionDecls() { return m_functionDecls.size(); }
837 FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
838
839 RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
840
841 unsigned numberOfConstantBuffers() const
842 {
843 if (!m_rareData)
844 return 0;
845 return m_rareData->m_constantBuffers.size();
846 }
847 unsigned addConstantBuffer(const Vector<JSValue>& buffer)
848 {
849 createRareDataIfNecessary();
850 unsigned size = m_rareData->m_constantBuffers.size();
851 m_rareData->m_constantBuffers.append(buffer);
852 return size;
853 }
854
855 Vector<JSValue>& constantBufferAsVector(unsigned index)
856 {
857 ASSERT(m_rareData);
858 return m_rareData->m_constantBuffers[index];
859 }
860 JSValue* constantBuffer(unsigned index)
861 {
862 return constantBufferAsVector(index).data();
863 }
864
865 JSGlobalObject* globalObject() { return m_globalObject.get(); }
866
867 JSGlobalObject* globalObjectFor(CodeOrigin);
868
869 // Jump Tables
870
871 size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
872 SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
873 SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
874
875 size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
876 SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
877 SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
878
879 size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
880 StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
881 StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
882
883
884 SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
885
886 EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
887
888 enum ShrinkMode {
889 // Shrink prior to generating machine code that may point directly into vectors.
890 EarlyShrink,
891
892 // Shrink after generating machine code, and after possibly creating new vectors
893 // and appending to others. At this time it is not safe to shrink certain vectors
894 // because we would have generated machine code that references them directly.
895 LateShrink
896 };
897 void shrinkToFit(ShrinkMode);
898
899 void copyPostParseDataFrom(CodeBlock* alternative);
900 void copyPostParseDataFromAlternative();
901
902 // Functions for controlling when JITting kicks in, in a mixed mode
903 // execution world.
904
905 bool checkIfJITThresholdReached()
906 {
907 return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
908 }
909
910 void dontJITAnytimeSoon()
911 {
912 m_llintExecuteCounter.deferIndefinitely();
913 }
914
915 void jitAfterWarmUp()
916 {
917 m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
918 }
919
920 void jitSoon()
921 {
922 m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
923 }
924
925 const ExecutionCounter& llintExecuteCounter() const
926 {
927 return m_llintExecuteCounter;
928 }
929
930 // Functions for controlling when tiered compilation kicks in. This
931 // controls both when the optimizing compiler is invoked and when OSR
932 // entry happens. Two triggers exist: the loop trigger and the return
933 // trigger. In either case, when an addition to m_jitExecuteCounter
934 // causes it to become non-negative, the optimizing compiler is
935 // invoked. This includes a fast check to see if this CodeBlock has
936 // already been optimized (i.e. replacement() returns a CodeBlock
937 // that was optimized with a higher tier JIT than this one). In the
938 // case of the loop trigger, if the optimized compilation succeeds
939 // (or has already succeeded in the past) then OSR is attempted to
940 // redirect program flow into the optimized code.
941
942 // These functions are called from within the optimization triggers,
943 // and are used as a single point at which we define the heuristics
944 // for how much warm-up is mandated before the next optimization
945 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
946 // as this is called from the CodeBlock constructor.
947
948 // When we observe a lot of speculation failures, we trigger a
949 // reoptimization. But each time, we increase the optimization trigger
950 // to avoid thrashing.
951 unsigned reoptimizationRetryCounter() const;
952 void countReoptimization();
953
954 int32_t codeTypeThresholdMultiplier() const;
955
956 int32_t counterValueForOptimizeAfterWarmUp();
957 int32_t counterValueForOptimizeAfterLongWarmUp();
958 int32_t counterValueForOptimizeSoon();
959
960 int32_t* addressOfJITExecuteCounter()
961 {
962 return &m_jitExecuteCounter.m_counter;
963 }
964
965 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
966 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
967 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
968
969 const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
970
971 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
972
973 // Check if the optimization threshold has been reached, and if not,
974 // adjust the heuristics accordingly. Returns true if the threshold has
975 // been reached.
976 bool checkIfOptimizationThresholdReached();
977
978 // Call this to force the next optimization trigger to fire. This is
979 // rarely wise, since optimization triggers are typically more
980 // expensive than executing baseline code.
981 void optimizeNextInvocation();
982
983 // Call this to prevent optimization from happening again. Note that
984 // optimization will still happen after roughly 2^29 invocations,
985 // so this is really meant to delay that as much as possible. This
986 // is called if optimization failed, and we expect it to fail in
987 // the future as well.
988 void dontOptimizeAnytimeSoon();
989
990 // Call this to reinitialize the counter to its starting state,
991 // forcing a warm-up to happen before the next optimization trigger
992 // fires. This is called in the CodeBlock constructor. It also
993 // makes sense to call this if an OSR exit occurred. Note that
994 // OSR exit code is code generated, so the value of the execute
995 // counter that this corresponds to is also available directly.
996 void optimizeAfterWarmUp();
997
998 // Call this to force an optimization trigger to fire only after
999 // a lot of warm-up.
1000 void optimizeAfterLongWarmUp();
1001
1002 // Call this to cause an optimization trigger to fire soon, but
1003 // not necessarily the next one. This makes sense if optimization
1004 // succeeds. Successfuly optimization means that all calls are
1005 // relinked to the optimized code, so this only affects call
1006 // frames that are still executing this CodeBlock. The value here
1007 // is tuned to strike a balance between the cost of OSR entry
1008 // (which is too high to warrant making every loop back edge to
1009 // trigger OSR immediately) and the cost of executing baseline
1010 // code (which is high enough that we don't necessarily want to
1011 // have a full warm-up). The intuition for calling this instead of
1012 // optimizeNextInvocation() is for the case of recursive functions
1013 // with loops. Consider that there may be N call frames of some
1014 // recursive function, for a reasonably large value of N. The top
1015 // one triggers optimization, and then returns, and then all of
1016 // the others return. We don't want optimization to be triggered on
1017 // each return, as that would be superfluous. It only makes sense
1018 // to trigger optimization if one of those functions becomes hot
1019 // in the baseline code.
1020 void optimizeSoon();
1021
1022 uint32_t osrExitCounter() const { return m_osrExitCounter; }
1023
1024 void countOSRExit() { m_osrExitCounter++; }
1025
1026 uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
1027
1028 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
1029
1030 #if ENABLE(JIT)
1031 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
1032 uint32_t exitCountThresholdForReoptimization();
1033 uint32_t exitCountThresholdForReoptimizationFromLoop();
1034 bool shouldReoptimizeNow();
1035 bool shouldReoptimizeFromLoopNow();
1036 #endif
1037
1038 #if ENABLE(VALUE_PROFILER)
1039 bool shouldOptimizeNow();
1040 void updateAllValueProfilePredictions(OperationInProgress = NoOperation);
1041 void updateAllArrayPredictions(OperationInProgress = NoOperation);
1042 void updateAllPredictions(OperationInProgress = NoOperation);
1043 #else
1044 bool shouldOptimizeNow() { return false; }
1045 void updateAllValueProfilePredictions(OperationInProgress = NoOperation) { }
1046 void updateAllArrayPredictions(OperationInProgress = NoOperation) { }
1047 void updateAllPredictions(OperationInProgress = NoOperation) { }
1048 #endif
1049
1050 #if ENABLE(JIT)
1051 void reoptimize();
1052 #endif
1053
1054 #if ENABLE(VERBOSE_VALUE_PROFILE)
1055 void dumpValueProfiles();
1056 #endif
1057
1058 // FIXME: Make these remaining members private.
1059
1060 int m_numCalleeRegisters;
1061 int m_numVars;
1062 bool m_isConstructor;
1063
1064 protected:
1065 #if ENABLE(JIT)
1066 virtual bool jitCompileImpl(ExecState*) = 0;
1067 virtual void jettisonImpl() = 0;
1068 #endif
1069 virtual void visitWeakReferences(SlotVisitor&);
1070 virtual void finalizeUnconditionally();
1071
1072 #if ENABLE(DFG_JIT)
1073 void tallyFrequentExitSites();
1074 #else
1075 void tallyFrequentExitSites() { }
1076 #endif
1077
1078 private:
1079 friend class DFGCodeBlocks;
1080
1081 double optimizationThresholdScalingFactor();
1082
1083 #if ENABLE(JIT)
1084 ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
1085 #endif
1086
1087 #if ENABLE(VALUE_PROFILER)
1088 void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
1089 #endif
1090
1091 void setIdentifiers(const Vector<Identifier>& identifiers)
1092 {
1093 RELEASE_ASSERT(m_identifiers.isEmpty());
1094 m_identifiers.appendVector(identifiers);
1095 }
1096
1097 void setConstantRegisters(const Vector<WriteBarrier<Unknown> >& constants)
1098 {
1099 size_t count = constants.size();
1100 m_constantRegisters.resize(count);
1101 for (size_t i = 0; i < count; i++)
1102 m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
1103 }
1104
1105 void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&);
1106
1107 CString registerName(ExecState*, int r) const;
1108 void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
1109 void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
1110 void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
1111 void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
1112 void printGetByIdCacheStatus(PrintStream&, ExecState*, int location);
1113 enum CacheDumpMode { DumpCaches, DontDumpCaches };
1114 void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode);
1115 void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
1116 void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
1117 void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
1118 void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
1119 #if ENABLE(VALUE_PROFILER)
1120 void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
1121 #endif
1122
1123 void visitStructures(SlotVisitor&, Instruction* vPC);
1124
1125 #if ENABLE(DFG_JIT)
1126 bool shouldImmediatelyAssumeLivenessDuringScan()
1127 {
1128 // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
1129 // CodeBlocks don't need to be jettisoned when their weak references go
1130 // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
1131 // this means that it's live.
1132 if (!m_dfgData)
1133 return true;
1134
1135 // For simplicity, we don't attempt to jettison code blocks during GC if
1136 // they are executing. Instead we strongly mark their weak references to
1137 // allow them to continue to execute soundly.
1138 if (m_dfgData->mayBeExecuting)
1139 return true;
1140
1141 if (Options::forceDFGCodeBlockLiveness())
1142 return true;
1143
1144 return false;
1145 }
1146 #else
1147 bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1148 #endif
1149
1150 void performTracingFixpointIteration(SlotVisitor&);
1151
1152 void stronglyVisitStrongReferences(SlotVisitor&);
1153 void stronglyVisitWeakReferences(SlotVisitor&);
1154
1155 void createRareDataIfNecessary()
1156 {
1157 if (!m_rareData)
1158 m_rareData = adoptPtr(new RareData);
1159 }
1160
1161 #if ENABLE(JIT)
1162 void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
1163 void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
1164 #endif
1165 WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
1166 int m_numParameters;
1167 WriteBarrier<ScriptExecutable> m_ownerExecutable;
1168 VM* m_vm;
1169
1170 RefCountedArray<Instruction> m_instructions;
1171 int m_thisRegister;
1172 int m_argumentsRegister;
1173 int m_activationRegister;
1174
1175 bool m_isStrictMode;
1176 bool m_needsActivation;
1177
1178 RefPtr<SourceProvider> m_source;
1179 unsigned m_sourceOffset;
1180 unsigned m_firstLineColumnOffset;
1181 unsigned m_codeType;
1182
1183 #if ENABLE(LLINT)
1184 SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
1185 SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
1186 #endif
1187 #if ENABLE(JIT)
1188 Vector<StructureStubInfo> m_structureStubInfos;
1189 Vector<ByValInfo> m_byValInfos;
1190 Vector<CallLinkInfo> m_callLinkInfos;
1191 JITCode m_jitCode;
1192 MacroAssemblerCodePtr m_jitCodeWithArityCheck;
1193 SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
1194 #endif
1195 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
1196 OwnPtr<CompactJITCodeMap> m_jitCodeMap;
1197 #endif
1198 #if ENABLE(DFG_JIT)
1199 struct WeakReferenceTransition {
1200 WeakReferenceTransition() { }
1201
1202 WeakReferenceTransition(VM& vm, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
1203 : m_from(vm, owner, from)
1204 , m_to(vm, owner, to)
1205 {
1206 if (!!codeOrigin)
1207 m_codeOrigin.set(vm, owner, codeOrigin);
1208 }
1209
1210 WriteBarrier<JSCell> m_codeOrigin;
1211 WriteBarrier<JSCell> m_from;
1212 WriteBarrier<JSCell> m_to;
1213 };
1214
1215 struct DFGData {
1216 DFGData()
1217 : mayBeExecuting(false)
1218 , isJettisoned(false)
1219 {
1220 }
1221
1222 Vector<DFG::OSREntryData> osrEntry;
1223 SegmentedVector<DFG::OSRExit, 8> osrExit;
1224 Vector<DFG::SpeculationRecovery> speculationRecovery;
1225 SegmentedVector<JumpReplacementWatchpoint, 1, 0> watchpoints;
1226 Vector<WeakReferenceTransition> transitions;
1227 Vector<WriteBarrier<JSCell> > weakReferences;
1228 DFG::VariableEventStream variableEventStream;
1229 DFG::MinifiedGraph minifiedDFG;
1230 RefPtr<Profiler::Compilation> compilation;
1231 bool mayBeExecuting;
1232 bool isJettisoned;
1233 bool livenessHasBeenProved; // Initialized and used on every GC.
1234 bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
1235 unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
1236 };
1237
1238 OwnPtr<DFGData> m_dfgData;
1239
1240 // This is relevant to non-DFG code blocks that serve as the profiled code block
1241 // for DFG code blocks.
1242 DFG::ExitProfile m_exitProfile;
1243 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
1244 #endif
1245 #if ENABLE(VALUE_PROFILER)
1246 Vector<ValueProfile> m_argumentValueProfiles;
1247 SegmentedVector<ValueProfile, 8> m_valueProfiles;
1248 SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
1249 SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
1250 SegmentedVector<ArrayAllocationProfile, 8> m_arrayAllocationProfiles;
1251 ArrayProfileVector m_arrayProfiles;
1252 #endif
1253 SegmentedVector<ObjectAllocationProfile, 8> m_objectAllocationProfiles;
1254
1255 // Constant Pool
1256 Vector<Identifier> m_identifiers;
1257 COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1258 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
1259 // it, so we're stuck with it for now.
1260 Vector<WriteBarrier<Unknown> > m_constantRegisters;
1261 Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
1262 Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
1263
1264 OwnPtr<CodeBlock> m_alternative;
1265
1266 ExecutionCounter m_llintExecuteCounter;
1267
1268 ExecutionCounter m_jitExecuteCounter;
1269 int32_t m_totalJITExecutions;
1270 uint32_t m_osrExitCounter;
1271 uint16_t m_optimizationDelayCounter;
1272 uint16_t m_reoptimizationRetryCounter;
1273
1274 Vector<ResolveOperations> m_resolveOperations;
1275 Vector<PutToBaseOperation, 1> m_putToBaseOperations;
1276
1277 struct RareData {
1278 WTF_MAKE_FAST_ALLOCATED;
1279 public:
1280 Vector<HandlerInfo> m_exceptionHandlers;
1281
1282 // Buffers used for large array literals
1283 Vector<Vector<JSValue> > m_constantBuffers;
1284
1285 // Jump Tables
1286 Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
1287 Vector<SimpleJumpTable> m_characterSwitchJumpTables;
1288 Vector<StringJumpTable> m_stringSwitchJumpTables;
1289
1290 EvalCodeCache m_evalCodeCache;
1291
1292 #if ENABLE(JIT)
1293 Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow> m_callReturnIndexVector;
1294 #endif
1295 #if ENABLE(DFG_JIT)
1296 SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
1297 Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow> m_codeOrigins;
1298 #endif
1299 };
1300 #if COMPILER(MSVC)
1301 friend void WTF::deleteOwnedPtr<RareData>(RareData*);
1302 #endif
1303 OwnPtr<RareData> m_rareData;
1304 #if ENABLE(JIT)
1305 DFG::CapabilityLevel m_canCompileWithDFGState;
1306 #endif
1307 };
1308
1309 // Program code is not marked by any function, so we make the global object
1310 // responsible for marking it.
1311
1312 class GlobalCodeBlock : public CodeBlock {
1313 protected:
1314 GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
1315 : CodeBlock(CopyParsedBlock, other)
1316 {
1317 }
1318
1319 GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative)
1320 : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, sourceOffset, firstLineColumnOffset, alternative)
1321 {
1322 }
1323 };
1324
1325 class ProgramCodeBlock : public GlobalCodeBlock {
1326 public:
1327 ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
1328 : GlobalCodeBlock(CopyParsedBlock, other)
1329 {
1330 }
1331
1332 ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative)
1333 : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, 0, firstLineColumnOffset, alternative)
1334 {
1335 }
1336
1337 #if ENABLE(JIT)
1338 protected:
1339 virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
1340 virtual void jettisonImpl();
1341 virtual bool jitCompileImpl(ExecState*);
1342 virtual CodeBlock* replacement();
1343 virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1344 #endif
1345 };
1346
1347 class EvalCodeBlock : public GlobalCodeBlock {
1348 public:
1349 EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
1350 : GlobalCodeBlock(CopyParsedBlock, other)
1351 {
1352 }
1353
1354 EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
1355 : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, 0, 1, alternative)
1356 {
1357 }
1358
1359 const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
1360 unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
1361
1362 #if ENABLE(JIT)
1363 protected:
1364 virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
1365 virtual void jettisonImpl();
1366 virtual bool jitCompileImpl(ExecState*);
1367 virtual CodeBlock* replacement();
1368 virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1369 #endif
1370
1371 private:
1372 UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
1373 };
1374
1375 class FunctionCodeBlock : public CodeBlock {
1376 public:
1377 FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
1378 : CodeBlock(CopyParsedBlock, other)
1379 {
1380 }
1381
1382 FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset, PassOwnPtr<CodeBlock> alternative = nullptr)
1383 : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, sourceOffset, firstLineColumnOffset, alternative)
1384 {
1385 }
1386
1387 #if ENABLE(JIT)
1388 protected:
1389 virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
1390 virtual void jettisonImpl();
1391 virtual bool jitCompileImpl(ExecState*);
1392 virtual CodeBlock* replacement();
1393 virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1394 #endif
1395 };
1396
1397 inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
1398 {
1399 RELEASE_ASSERT(inlineCallFrame);
1400 ExecutableBase* executable = inlineCallFrame->executable.get();
1401 RELEASE_ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
1402 return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
1403 }
1404
1405 inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
1406 {
1407 if (codeOrigin.inlineCallFrame)
1408 return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
1409 return baselineCodeBlock;
1410 }
1411
1412 inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
1413 {
1414 if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
1415 return CallFrame::argumentOffset(argument);
1416
1417 const SlowArgument* slowArguments = symbolTable()->slowArguments();
1418 if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
1419 return CallFrame::argumentOffset(argument);
1420
1421 ASSERT(slowArguments[argument].status == SlowArgument::Captured);
1422 return slowArguments[argument].index;
1423 }
1424
1425 inline Register& ExecState::r(int index)
1426 {
1427 CodeBlock* codeBlock = this->codeBlock();
1428 if (codeBlock->isConstantRegisterIndex(index))
1429 return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1430 return this[index];
1431 }
1432
1433 inline Register& ExecState::uncheckedR(int index)
1434 {
1435 RELEASE_ASSERT(index < FirstConstantRegisterIndex);
1436 return this[index];
1437 }
1438
1439 #if ENABLE(DFG_JIT)
1440 inline bool ExecState::isInlineCallFrame()
1441 {
1442 if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
1443 return false;
1444 return isInlineCallFrameSlow();
1445 }
1446 #endif
1447
1448 inline JSValue ExecState::argumentAfterCapture(size_t argument)
1449 {
1450 if (argument >= argumentCount())
1451 return jsUndefined();
1452
1453 if (!codeBlock())
1454 return this[argumentOffset(argument)].jsValue();
1455
1456 return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
1457 }
1458
1459 #if ENABLE(DFG_JIT)
1460 inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
1461 {
1462 // We have to check for 0 and -1 because those are used by the HashMap as markers.
1463 uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
1464
1465 // This checks for both of those nasty cases in one go.
1466 // 0 + 1 = 1
1467 // -1 + 1 = 0
1468 if (value + 1 <= 1)
1469 return;
1470
1471 HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
1472 if (iter == m_set.end())
1473 return;
1474
1475 (*iter)->m_dfgData->mayBeExecuting = true;
1476 }
1477 #endif
1478
1479 } // namespace JSC
1480
1481 #endif // CodeBlock_h