]> git.saurik.com Git - apple/javascriptcore.git/blob - bytecode/CodeBlock.h
JavaScriptCore-7600.1.4.11.8.tar.gz
[apple/javascriptcore.git] / bytecode / CodeBlock.h
1 /*
2 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #ifndef CodeBlock_h
31 #define CodeBlock_h
32
33 #include "ArrayProfile.h"
34 #include "ByValInfo.h"
35 #include "BytecodeConventions.h"
36 #include "BytecodeLivenessAnalysis.h"
37 #include "CallLinkInfo.h"
38 #include "CallReturnOffsetToBytecodeOffset.h"
39 #include "CodeBlockHash.h"
40 #include "CodeBlockSet.h"
41 #include "ConcurrentJITLock.h"
42 #include "CodeOrigin.h"
43 #include "CodeType.h"
44 #include "CompactJITCodeMap.h"
45 #include "DFGCommon.h"
46 #include "DFGCommonData.h"
47 #include "DFGExitProfile.h"
48 #include "DeferredCompilationCallback.h"
49 #include "EvalCodeCache.h"
50 #include "ExecutionCounter.h"
51 #include "ExpressionRangeInfo.h"
52 #include "HandlerInfo.h"
53 #include "ObjectAllocationProfile.h"
54 #include "Options.h"
55 #include "PutPropertySlot.h"
56 #include "Instruction.h"
57 #include "JITCode.h"
58 #include "JITWriteBarrier.h"
59 #include "JSGlobalObject.h"
60 #include "JumpTable.h"
61 #include "LLIntCallLinkInfo.h"
62 #include "LazyOperandValueProfile.h"
63 #include "ProfilerCompilation.h"
64 #include "ProfilerJettisonReason.h"
65 #include "RegExpObject.h"
66 #include "StructureStubInfo.h"
67 #include "UnconditionalFinalizer.h"
68 #include "ValueProfile.h"
69 #include "VirtualRegister.h"
70 #include "Watchpoint.h"
71 #include <wtf/Bag.h>
72 #include <wtf/FastMalloc.h>
73 #include <wtf/PassOwnPtr.h>
74 #include <wtf/RefCountedArray.h>
75 #include <wtf/RefPtr.h>
76 #include <wtf/SegmentedVector.h>
77 #include <wtf/Vector.h>
78 #include <wtf/text/WTFString.h>
79
80 namespace JSC {
81
82 class ExecState;
83 class LLIntOffsetsExtractor;
84 class RepatchBuffer;
85
86 inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
87
88 static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
89
90 enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };
91
92 class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
93 WTF_MAKE_FAST_ALLOCATED;
94 friend class BytecodeLivenessAnalysis;
95 friend class JIT;
96 friend class LLIntOffsetsExtractor;
97 public:
98 enum CopyParsedBlockTag { CopyParsedBlock };
99 protected:
100 CodeBlock(CopyParsedBlockTag, CodeBlock& other);
101
102 CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
103
104 WriteBarrier<JSGlobalObject> m_globalObject;
105 Heap* m_heap;
106
107 public:
108 JS_EXPORT_PRIVATE virtual ~CodeBlock();
109
110 UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
111
112 CString inferredName() const;
113 CodeBlockHash hash() const;
114 bool hasHash() const;
115 bool isSafeToComputeHash() const;
116 CString hashAsStringIfPossible() const;
117 CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
118 CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
119 void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
120 void dump(PrintStream&) const;
121
122 int numParameters() const { return m_numParameters; }
123 void setNumParameters(int newValue);
124
125 int* addressOfNumParameters() { return &m_numParameters; }
126 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
127
128 CodeBlock* alternative() { return m_alternative.get(); }
129 PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
130 void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
131
132 template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor)
133 {
134 Functor f(std::forward<Functor>(functor));
135 Vector<CodeBlock*, 4> codeBlocks;
136 codeBlocks.append(this);
137
138 while (!codeBlocks.isEmpty()) {
139 CodeBlock* currentCodeBlock = codeBlocks.takeLast();
140 f(currentCodeBlock);
141
142 if (CodeBlock* alternative = currentCodeBlock->alternative())
143 codeBlocks.append(alternative);
144 if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull())
145 codeBlocks.append(osrEntryBlock);
146 }
147 }
148
149 CodeSpecializationKind specializationKind() const
150 {
151 return specializationFromIsConstruct(m_isConstructor);
152 }
153
154 CodeBlock* baselineAlternative();
155
156 // FIXME: Get rid of this.
157 // https://bugs.webkit.org/show_bug.cgi?id=123677
158 CodeBlock* baselineVersion();
159
160 void visitAggregate(SlotVisitor&);
161
162 void dumpBytecode(PrintStream& = WTF::dataFile());
163 void dumpBytecode(
164 PrintStream&, unsigned bytecodeOffset,
165 const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
166 void printStructures(PrintStream&, const Instruction*);
167 void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
168
169 bool isStrictMode() const { return m_isStrictMode; }
170 ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
171
172 inline bool isKnownNotImmediate(int index)
173 {
174 if (index == m_thisRegister.offset() && !m_isStrictMode)
175 return true;
176
177 if (isConstantRegisterIndex(index))
178 return getConstant(index).isCell();
179
180 return false;
181 }
182
183 ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
184 {
185 return index >= m_numVars;
186 }
187
188 HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
189 unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
190 unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
191 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
192 int& startOffset, int& endOffset, unsigned& line, unsigned& column);
193
194 void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
195 void getStubInfoMap(StubInfoMap& result);
196
197 void getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result);
198 void getCallLinkInfoMap(CallLinkInfoMap& result);
199
200 #if ENABLE(JIT)
201 StructureStubInfo* addStubInfo();
202 Bag<StructureStubInfo>::iterator stubInfoBegin() { return m_stubInfos.begin(); }
203 Bag<StructureStubInfo>::iterator stubInfoEnd() { return m_stubInfos.end(); }
204
205 void resetStub(StructureStubInfo&);
206
207 ByValInfo& getByValInfo(unsigned bytecodeIndex)
208 {
209 return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
210 }
211
212 CallLinkInfo* addCallLinkInfo();
213 Bag<CallLinkInfo>::iterator callLinkInfosBegin() { return m_callLinkInfos.begin(); }
214 Bag<CallLinkInfo>::iterator callLinkInfosEnd() { return m_callLinkInfos.end(); }
215
216 // This is a slow function call used primarily for compiling OSR exits in the case
217 // that there had been inlining. Chances are if you want to use this, you're really
218 // looking for a CallLinkInfoMap to amortize the cost of calling this.
219 CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
220 #endif // ENABLE(JIT)
221
222 void unlinkIncomingCalls();
223
224 #if ENABLE(JIT)
225 void unlinkCalls();
226
227 void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
228
229 bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
230 {
231 return m_incomingCalls.isOnList(incoming);
232 }
233 #endif // ENABLE(JIT)
234
235 void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
236
237 void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
238 {
239 m_jitCodeMap = jitCodeMap;
240 }
241 CompactJITCodeMap* jitCodeMap()
242 {
243 return m_jitCodeMap.get();
244 }
245
246 unsigned bytecodeOffset(Instruction* returnAddress)
247 {
248 RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
249 return static_cast<Instruction*>(returnAddress) - instructions().begin();
250 }
251
252 bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
253
254 unsigned numberOfInstructions() const { return m_instructions.size(); }
255 RefCountedArray<Instruction>& instructions() { return m_instructions; }
256 const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
257
258 size_t predictedMachineCodeSize();
259
260 bool usesOpcode(OpcodeID);
261
262 unsigned instructionCount() const { return m_instructions.size(); }
263
264 int argumentIndexAfterCapture(size_t argument);
265
266 bool hasSlowArguments();
267 const SlowArgument* machineSlowArguments();
268
269 // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
270 void install();
271
272 // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
273 PassRefPtr<CodeBlock> newReplacement();
274
275 void setJITCode(PassRefPtr<JITCode> code)
276 {
277 ASSERT(m_heap->isDeferred());
278 m_heap->reportExtraMemoryCost(code->size());
279 ConcurrentJITLocker locker(m_lock);
280 WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
281 m_jitCode = code;
282 }
283 PassRefPtr<JITCode> jitCode() { return m_jitCode; }
284 JITCode::JITType jitType() const
285 {
286 JITCode* jitCode = m_jitCode.get();
287 WTF::loadLoadFence();
288 JITCode::JITType result = JITCode::jitTypeFor(jitCode);
289 WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
290 return result;
291 }
292
293 bool hasBaselineJITProfiling() const
294 {
295 return jitType() == JITCode::BaselineJIT;
296 }
297
298 #if ENABLE(JIT)
299 virtual CodeBlock* replacement() = 0;
300
301 virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
302 DFG::CapabilityLevel capabilityLevel();
303 DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
304
305 bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
306 bool hasOptimizedReplacement(); // the typeToReplace is my JITType
307 #endif
308
309 void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization);
310
311 ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
312
313 void setVM(VM* vm) { m_vm = vm; }
314 VM* vm() { return m_vm; }
315
316 void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
317 VirtualRegister thisRegister() const { return m_thisRegister; }
318
319 bool usesEval() const { return m_unlinkedCode->usesEval(); }
320
321 void setArgumentsRegister(VirtualRegister argumentsRegister)
322 {
323 ASSERT(argumentsRegister.isValid());
324 m_argumentsRegister = argumentsRegister;
325 ASSERT(usesArguments());
326 }
327 VirtualRegister argumentsRegister() const
328 {
329 ASSERT(usesArguments());
330 return m_argumentsRegister;
331 }
332 VirtualRegister uncheckedArgumentsRegister()
333 {
334 if (!usesArguments())
335 return VirtualRegister();
336 return argumentsRegister();
337 }
338 void setActivationRegister(VirtualRegister activationRegister)
339 {
340 m_activationRegister = activationRegister;
341 }
342
343 VirtualRegister activationRegister() const
344 {
345 ASSERT(m_activationRegister.isValid());
346 return m_activationRegister;
347 }
348
349 VirtualRegister uncheckedActivationRegister()
350 {
351 return m_activationRegister;
352 }
353
354 bool usesArguments() const { return m_argumentsRegister.isValid(); }
355
356 bool needsActivation() const
357 {
358 ASSERT(m_activationRegister.isValid() == m_needsActivation);
359 return m_needsActivation;
360 }
361
362 unsigned captureCount() const
363 {
364 if (!symbolTable())
365 return 0;
366 return symbolTable()->captureCount();
367 }
368
369 int captureStart() const
370 {
371 if (!symbolTable())
372 return 0;
373 return symbolTable()->captureStart();
374 }
375
376 int captureEnd() const
377 {
378 if (!symbolTable())
379 return 0;
380 return symbolTable()->captureEnd();
381 }
382
383 bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
384
385 int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
386 int framePointerOffsetToGetActivationRegisters();
387
388 CodeType codeType() const { return m_unlinkedCode->codeType(); }
389 PutPropertySlot::Context putByIdContext() const
390 {
391 if (codeType() == EvalCode)
392 return PutPropertySlot::PutByIdEval;
393 return PutPropertySlot::PutById;
394 }
395
396 SourceProvider* source() const { return m_source.get(); }
397 unsigned sourceOffset() const { return m_sourceOffset; }
398 unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
399
400 size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
401 unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
402
403 void clearEvalCache();
404
405 String nameForRegister(VirtualRegister);
406
407 #if ENABLE(JIT)
408 void setNumberOfByValInfos(size_t size) { m_byValInfos.resizeToFit(size); }
409 size_t numberOfByValInfos() const { return m_byValInfos.size(); }
410 ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
411 #endif
412
413 unsigned numberOfArgumentValueProfiles()
414 {
415 ASSERT(m_numParameters >= 0);
416 ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
417 return m_argumentValueProfiles.size();
418 }
419 ValueProfile* valueProfileForArgument(unsigned argumentIndex)
420 {
421 ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
422 ASSERT(result->m_bytecodeOffset == -1);
423 return result;
424 }
425
426 unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
427 ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
428 ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
429 {
430 ValueProfile* result = binarySearch<ValueProfile, int>(
431 m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
432 getValueProfileBytecodeOffset<ValueProfile>);
433 ASSERT(result->m_bytecodeOffset != -1);
434 ASSERT(instructions()[bytecodeOffset + opcodeLength(
435 m_vm->interpreter->getOpcodeID(
436 instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
437 return result;
438 }
439 SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
440 {
441 return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
442 }
443
444 unsigned totalNumberOfValueProfiles()
445 {
446 return numberOfArgumentValueProfiles() + numberOfValueProfiles();
447 }
448 ValueProfile* getFromAllValueProfiles(unsigned index)
449 {
450 if (index < numberOfArgumentValueProfiles())
451 return valueProfileForArgument(index);
452 return valueProfile(index - numberOfArgumentValueProfiles());
453 }
454
455 RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
456 {
457 m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
458 return &m_rareCaseProfiles.last();
459 }
460 unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
461 RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
462 RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
463
464 bool likelyToTakeSlowCase(int bytecodeOffset)
465 {
466 if (!hasBaselineJITProfiling())
467 return false;
468 unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
469 return value >= Options::likelyToTakeSlowCaseMinimumCount();
470 }
471
472 bool couldTakeSlowCase(int bytecodeOffset)
473 {
474 if (!hasBaselineJITProfiling())
475 return false;
476 unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
477 return value >= Options::couldTakeSlowCaseMinimumCount();
478 }
479
480 RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
481 {
482 m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
483 return &m_specialFastCaseProfiles.last();
484 }
485 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
486 RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
487 RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
488 {
489 return tryBinarySearch<RareCaseProfile, int>(
490 m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
491 getRareCaseProfileBytecodeOffset);
492 }
493
494 bool likelyToTakeSpecialFastCase(int bytecodeOffset)
495 {
496 if (!hasBaselineJITProfiling())
497 return false;
498 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
499 return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
500 }
501
502 bool couldTakeSpecialFastCase(int bytecodeOffset)
503 {
504 if (!hasBaselineJITProfiling())
505 return false;
506 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
507 return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
508 }
509
510 bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
511 {
512 if (!hasBaselineJITProfiling())
513 return false;
514 unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
515 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
516 unsigned value = slowCaseCount - specialFastCaseCount;
517 return value >= Options::likelyToTakeSlowCaseMinimumCount();
518 }
519
520 bool likelyToTakeAnySlowCase(int bytecodeOffset)
521 {
522 if (!hasBaselineJITProfiling())
523 return false;
524 unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
525 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
526 unsigned value = slowCaseCount + specialFastCaseCount;
527 return value >= Options::likelyToTakeSlowCaseMinimumCount();
528 }
529
530 unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
531 const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
532 ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
533 {
534 m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
535 return &m_arrayProfiles.last();
536 }
537 ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
538 ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
539
540 // Exception handling support
541
542 size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
543 HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
544
545 bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
546
547 #if ENABLE(DFG_JIT)
548 Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
549 {
550 return m_jitCode->dfgCommon()->codeOrigins;
551 }
552
553 // Having code origins implies that there has been some inlining.
554 bool hasCodeOrigins()
555 {
556 return JITCode::isOptimizingJIT(jitType());
557 }
558
559 bool canGetCodeOrigin(unsigned index)
560 {
561 if (!hasCodeOrigins())
562 return false;
563 return index < codeOrigins().size();
564 }
565
566 CodeOrigin codeOrigin(unsigned index)
567 {
568 return codeOrigins()[index];
569 }
570
571 bool addFrequentExitSite(const DFG::FrequentExitSite& site)
572 {
573 ASSERT(JITCode::isBaselineCode(jitType()));
574 ConcurrentJITLocker locker(m_lock);
575 return m_exitProfile.add(locker, site);
576 }
577
578 bool hasExitSite(const ConcurrentJITLocker& locker, const DFG::FrequentExitSite& site) const
579 {
580 return m_exitProfile.hasExitSite(locker, site);
581 }
582 bool hasExitSite(const DFG::FrequentExitSite& site) const
583 {
584 ConcurrentJITLocker locker(m_lock);
585 return hasExitSite(locker, site);
586 }
587
588 DFG::ExitProfile& exitProfile() { return m_exitProfile; }
589
590 CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
591 {
592 return m_lazyOperandValueProfiles;
593 }
594 #endif // ENABLE(DFG_JIT)
595
596 // Constant Pool
597 #if ENABLE(DFG_JIT)
598 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
599 size_t numberOfDFGIdentifiers() const
600 {
601 if (!JITCode::isOptimizingJIT(jitType()))
602 return 0;
603
604 return m_jitCode->dfgCommon()->dfgIdentifiers.size();
605 }
606
607 const Identifier& identifier(int index) const
608 {
609 size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
610 if (static_cast<unsigned>(index) < unlinkedIdentifiers)
611 return m_unlinkedCode->identifier(index);
612 ASSERT(JITCode::isOptimizingJIT(jitType()));
613 return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
614 }
615 #else
616 size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
617 const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
618 #endif
619
620 Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
621 size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
622 unsigned addConstant(JSValue v)
623 {
624 unsigned result = m_constantRegisters.size();
625 m_constantRegisters.append(WriteBarrier<Unknown>());
626 m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
627 return result;
628 }
629
630 unsigned addConstantLazily()
631 {
632 unsigned result = m_constantRegisters.size();
633 m_constantRegisters.append(WriteBarrier<Unknown>());
634 return result;
635 }
636
637 bool findConstant(JSValue, unsigned& result);
638 unsigned addOrFindConstant(JSValue);
639 WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
640 ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
641 ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
642
643 FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
644 int numberOfFunctionDecls() { return m_functionDecls.size(); }
645 FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
646
647 RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
648
649 unsigned numberOfConstantBuffers() const
650 {
651 if (!m_rareData)
652 return 0;
653 return m_rareData->m_constantBuffers.size();
654 }
655 unsigned addConstantBuffer(const Vector<JSValue>& buffer)
656 {
657 createRareDataIfNecessary();
658 unsigned size = m_rareData->m_constantBuffers.size();
659 m_rareData->m_constantBuffers.append(buffer);
660 return size;
661 }
662
663 Vector<JSValue>& constantBufferAsVector(unsigned index)
664 {
665 ASSERT(m_rareData);
666 return m_rareData->m_constantBuffers[index];
667 }
668 JSValue* constantBuffer(unsigned index)
669 {
670 return constantBufferAsVector(index).data();
671 }
672
673 Heap* heap() const { return m_heap; }
674 JSGlobalObject* globalObject() { return m_globalObject.get(); }
675
676 JSGlobalObject* globalObjectFor(CodeOrigin);
677
678 BytecodeLivenessAnalysis& livenessAnalysis()
679 {
680 {
681 ConcurrentJITLocker locker(m_lock);
682 if (!!m_livenessAnalysis)
683 return *m_livenessAnalysis;
684 }
685 std::unique_ptr<BytecodeLivenessAnalysis> analysis =
686 std::make_unique<BytecodeLivenessAnalysis>(this);
687 {
688 ConcurrentJITLocker locker(m_lock);
689 if (!m_livenessAnalysis)
690 m_livenessAnalysis = WTF::move(analysis);
691 return *m_livenessAnalysis;
692 }
693 }
694
695 void validate();
696
697 // Jump Tables
698
699 size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
700 SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
701 SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
702 void clearSwitchJumpTables()
703 {
704 if (!m_rareData)
705 return;
706 m_rareData->m_switchJumpTables.clear();
707 }
708
709 size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
710 StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
711 StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
712
713
714 SymbolTable* symbolTable() const { return m_symbolTable.get(); }
715
716 EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
717
718 enum ShrinkMode {
719 // Shrink prior to generating machine code that may point directly into vectors.
720 EarlyShrink,
721
722 // Shrink after generating machine code, and after possibly creating new vectors
723 // and appending to others. At this time it is not safe to shrink certain vectors
724 // because we would have generated machine code that references them directly.
725 LateShrink
726 };
727 void shrinkToFit(ShrinkMode);
728
729 // Functions for controlling when JITting kicks in, in a mixed mode
730 // execution world.
731
732 bool checkIfJITThresholdReached()
733 {
734 return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
735 }
736
737 void dontJITAnytimeSoon()
738 {
739 m_llintExecuteCounter.deferIndefinitely();
740 }
741
742 void jitAfterWarmUp()
743 {
744 m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
745 }
746
747 void jitSoon()
748 {
749 m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
750 }
751
752 const BaselineExecutionCounter& llintExecuteCounter() const
753 {
754 return m_llintExecuteCounter;
755 }
756
757 // Functions for controlling when tiered compilation kicks in. This
758 // controls both when the optimizing compiler is invoked and when OSR
759 // entry happens. Two triggers exist: the loop trigger and the return
760 // trigger. In either case, when an addition to m_jitExecuteCounter
761 // causes it to become non-negative, the optimizing compiler is
762 // invoked. This includes a fast check to see if this CodeBlock has
763 // already been optimized (i.e. replacement() returns a CodeBlock
764 // that was optimized with a higher tier JIT than this one). In the
765 // case of the loop trigger, if the optimized compilation succeeds
766 // (or has already succeeded in the past) then OSR is attempted to
767 // redirect program flow into the optimized code.
768
769 // These functions are called from within the optimization triggers,
770 // and are used as a single point at which we define the heuristics
771 // for how much warm-up is mandated before the next optimization
772 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
773 // as this is called from the CodeBlock constructor.
774
775 // When we observe a lot of speculation failures, we trigger a
776 // reoptimization. But each time, we increase the optimization trigger
777 // to avoid thrashing.
778 JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const;
779 void countReoptimization();
780 #if ENABLE(JIT)
781 unsigned numberOfDFGCompiles();
782
783 int32_t codeTypeThresholdMultiplier() const;
784
785 int32_t adjustedCounterValue(int32_t desiredThreshold);
786
787 int32_t* addressOfJITExecuteCounter()
788 {
789 return &m_jitExecuteCounter.m_counter;
790 }
791
792 static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); }
793 static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); }
794 static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); }
795
796 const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
797
798 unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
799
800 // Check if the optimization threshold has been reached, and if not,
801 // adjust the heuristics accordingly. Returns true if the threshold has
802 // been reached.
803 bool checkIfOptimizationThresholdReached();
804
805 // Call this to force the next optimization trigger to fire. This is
806 // rarely wise, since optimization triggers are typically more
807 // expensive than executing baseline code.
808 void optimizeNextInvocation();
809
810 // Call this to prevent optimization from happening again. Note that
811 // optimization will still happen after roughly 2^29 invocations,
812 // so this is really meant to delay that as much as possible. This
813 // is called if optimization failed, and we expect it to fail in
814 // the future as well.
815 void dontOptimizeAnytimeSoon();
816
817 // Call this to reinitialize the counter to its starting state,
818 // forcing a warm-up to happen before the next optimization trigger
819 // fires. This is called in the CodeBlock constructor. It also
820 // makes sense to call this if an OSR exit occurred. Note that
821 // OSR exit code is code generated, so the value of the execute
822 // counter that this corresponds to is also available directly.
823 void optimizeAfterWarmUp();
824
825 // Call this to force an optimization trigger to fire only after
826 // a lot of warm-up.
827 void optimizeAfterLongWarmUp();
828
829 // Call this to cause an optimization trigger to fire soon, but
830 // not necessarily the next one. This makes sense if optimization
831 // succeeds. Successfuly optimization means that all calls are
832 // relinked to the optimized code, so this only affects call
833 // frames that are still executing this CodeBlock. The value here
834 // is tuned to strike a balance between the cost of OSR entry
835 // (which is too high to warrant making every loop back edge to
836 // trigger OSR immediately) and the cost of executing baseline
837 // code (which is high enough that we don't necessarily want to
838 // have a full warm-up). The intuition for calling this instead of
839 // optimizeNextInvocation() is for the case of recursive functions
840 // with loops. Consider that there may be N call frames of some
841 // recursive function, for a reasonably large value of N. The top
842 // one triggers optimization, and then returns, and then all of
843 // the others return. We don't want optimization to be triggered on
844 // each return, as that would be superfluous. It only makes sense
845 // to trigger optimization if one of those functions becomes hot
846 // in the baseline code.
847 void optimizeSoon();
848
849 void forceOptimizationSlowPathConcurrently();
850
851 void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
852
853 uint32_t osrExitCounter() const { return m_osrExitCounter; }
854
855 void countOSRExit() { m_osrExitCounter++; }
856
857 uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
858
859 static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
860
861 uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
862 uint32_t exitCountThresholdForReoptimization();
863 uint32_t exitCountThresholdForReoptimizationFromLoop();
864 bool shouldReoptimizeNow();
865 bool shouldReoptimizeFromLoopNow();
866 #else // No JIT
867 void optimizeAfterWarmUp() { }
868 unsigned numberOfDFGCompiles() { return 0; }
869 #endif
870
871 bool shouldOptimizeNow();
872 void updateAllValueProfilePredictions();
873 void updateAllArrayPredictions();
874 void updateAllPredictions();
875
876 unsigned frameRegisterCount();
877 int stackPointerOffset();
878
879 bool hasOpDebugForLineAndColumn(unsigned line, unsigned column);
880
881 bool hasDebuggerRequests() const { return m_debuggerRequests; }
882 void* debuggerRequestsAddress() { return &m_debuggerRequests; }
883
884 void addBreakpoint(unsigned numBreakpoints);
885 void removeBreakpoint(unsigned numBreakpoints)
886 {
887 ASSERT(m_numBreakpoints >= numBreakpoints);
888 m_numBreakpoints -= numBreakpoints;
889 }
890
891 enum SteppingMode {
892 SteppingModeDisabled,
893 SteppingModeEnabled
894 };
895 void setSteppingMode(SteppingMode);
896
897 void clearDebuggerRequests()
898 {
899 m_steppingMode = SteppingModeDisabled;
900 m_numBreakpoints = 0;
901 }
902
903 // FIXME: Make these remaining members private.
904
905 int m_numCalleeRegisters;
906 int m_numVars;
907 bool m_isConstructor : 1;
908
909 // This is intentionally public; it's the responsibility of anyone doing any
910 // of the following to hold the lock:
911 //
912 // - Modifying any inline cache in this code block.
913 //
914 // - Quering any inline cache in this code block, from a thread other than
915 // the main thread.
916 //
917 // Additionally, it's only legal to modify the inline cache on the main
918 // thread. This means that the main thread can query the inline cache without
919 // locking. This is crucial since executing the inline cache is effectively
920 // "querying" it.
921 //
922 // Another exception to the rules is that the GC can do whatever it wants
923 // without holding any locks, because the GC is guaranteed to wait until any
924 // concurrent compilation threads finish what they're doing.
925 mutable ConcurrentJITLock m_lock;
926
927 bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it.
928 bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC.
929
930 bool m_didFailFTLCompilation : 1;
931 bool m_hasBeenCompiledWithFTL : 1;
932
933 // Internal methods for use by validation code. It would be private if it wasn't
934 // for the fact that we use it from anonymous namespaces.
935 void beginValidationDidFail();
936 NO_RETURN_DUE_TO_CRASH void endValidationDidFail();
937
938 bool isKnownToBeLiveDuringGC(); // Will only return valid results when called during GC. Assumes that you've already established that the owner executable is live.
939
940 protected:
941 virtual void visitWeakReferences(SlotVisitor&) override;
942 virtual void finalizeUnconditionally() override;
943
944 #if ENABLE(DFG_JIT)
945 void tallyFrequentExitSites();
946 #else
947 void tallyFrequentExitSites() { }
948 #endif
949
950 private:
951 friend class CodeBlockSet;
952
953 CodeBlock* specialOSREntryBlockOrNull();
954
955 void noticeIncomingCall(ExecState* callerFrame);
956
957 double optimizationThresholdScalingFactor();
958
959 #if ENABLE(JIT)
960 ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
961 #endif
962
963 void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
964
965 void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
966 {
967 size_t count = constants.size();
968 m_constantRegisters.resize(count);
969 for (size_t i = 0; i < count; i++)
970 m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
971 }
972
973 void dumpBytecode(
974 PrintStream&, ExecState*, const Instruction* begin, const Instruction*&,
975 const StubInfoMap& = StubInfoMap(), const CallLinkInfoMap& = CallLinkInfoMap());
976
977 CString registerName(int r) const;
978 void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
979 void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
980 void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
981 void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
982 void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
983 enum CacheDumpMode { DumpCaches, DontDumpCaches };
984 void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap&);
985 void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
986 void printLocationAndOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
987 void printLocationOpAndRegisterOperand(PrintStream&, ExecState*, int location, const Instruction*& it, const char* op, int operand);
988
989 void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
990 void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
991 void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
992 void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
993
994 bool shouldImmediatelyAssumeLivenessDuringScan();
995
996 void propagateTransitions(SlotVisitor&);
997 void determineLiveness(SlotVisitor&);
998
999 void stronglyVisitStrongReferences(SlotVisitor&);
1000 void stronglyVisitWeakReferences(SlotVisitor&);
1001
1002 void createRareDataIfNecessary()
1003 {
1004 if (!m_rareData)
1005 m_rareData = adoptPtr(new RareData);
1006 }
1007
1008 #if ENABLE(JIT)
1009 void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
1010 void resetStubDuringGCInternal(RepatchBuffer&, StructureStubInfo&);
1011 #endif
1012 WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode;
1013 int m_numParameters;
1014 union {
1015 unsigned m_debuggerRequests;
1016 struct {
1017 unsigned m_hasDebuggerStatement : 1;
1018 unsigned m_steppingMode : 1;
1019 unsigned m_numBreakpoints : 30;
1020 };
1021 };
1022 WriteBarrier<ScriptExecutable> m_ownerExecutable;
1023 VM* m_vm;
1024
1025 RefCountedArray<Instruction> m_instructions;
1026 WriteBarrier<SymbolTable> m_symbolTable;
1027 VirtualRegister m_thisRegister;
1028 VirtualRegister m_argumentsRegister;
1029 VirtualRegister m_activationRegister;
1030
1031 bool m_isStrictMode;
1032 bool m_needsActivation;
1033 bool m_mayBeExecuting;
1034 uint8_t m_visitAggregateHasBeenCalled;
1035
1036 RefPtr<SourceProvider> m_source;
1037 unsigned m_sourceOffset;
1038 unsigned m_firstLineColumnOffset;
1039 unsigned m_codeType;
1040
1041 Vector<LLIntCallLinkInfo> m_llintCallLinkInfos;
1042 SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls;
1043 RefPtr<JITCode> m_jitCode;
1044 #if ENABLE(JIT)
1045 Bag<StructureStubInfo> m_stubInfos;
1046 Vector<ByValInfo> m_byValInfos;
1047 Bag<CallLinkInfo> m_callLinkInfos;
1048 SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
1049 #endif
1050 OwnPtr<CompactJITCodeMap> m_jitCodeMap;
1051 #if ENABLE(DFG_JIT)
1052 // This is relevant to non-DFG code blocks that serve as the profiled code block
1053 // for DFG code blocks.
1054 DFG::ExitProfile m_exitProfile;
1055 CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
1056 #endif
1057 Vector<ValueProfile> m_argumentValueProfiles;
1058 Vector<ValueProfile> m_valueProfiles;
1059 SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
1060 SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
1061 Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
1062 ArrayProfileVector m_arrayProfiles;
1063 Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
1064
1065 // Constant Pool
1066 COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1067 // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates
1068 // it, so we're stuck with it for now.
1069 Vector<WriteBarrier<Unknown>> m_constantRegisters;
1070 Vector<WriteBarrier<FunctionExecutable>> m_functionDecls;
1071 Vector<WriteBarrier<FunctionExecutable>> m_functionExprs;
1072
1073 RefPtr<CodeBlock> m_alternative;
1074
1075 BaselineExecutionCounter m_llintExecuteCounter;
1076
1077 BaselineExecutionCounter m_jitExecuteCounter;
1078 int32_t m_totalJITExecutions;
1079 uint32_t m_osrExitCounter;
1080 uint16_t m_optimizationDelayCounter;
1081 uint16_t m_reoptimizationRetryCounter;
1082
1083 mutable CodeBlockHash m_hash;
1084
1085 std::unique_ptr<BytecodeLivenessAnalysis> m_livenessAnalysis;
1086
1087 struct RareData {
1088 WTF_MAKE_FAST_ALLOCATED;
1089 public:
1090 Vector<HandlerInfo> m_exceptionHandlers;
1091
1092 // Buffers used for large array literals
1093 Vector<Vector<JSValue>> m_constantBuffers;
1094
1095 // Jump Tables
1096 Vector<SimpleJumpTable> m_switchJumpTables;
1097 Vector<StringJumpTable> m_stringSwitchJumpTables;
1098
1099 EvalCodeCache m_evalCodeCache;
1100 };
1101 #if COMPILER(MSVC)
1102 friend void WTF::deleteOwnedPtr<RareData>(RareData*);
1103 #endif
1104 OwnPtr<RareData> m_rareData;
1105 #if ENABLE(JIT)
1106 DFG::CapabilityLevel m_capabilityLevelState;
1107 #endif
1108 };
1109
1110 // Program code is not marked by any function, so we make the global object
1111 // responsible for marking it.
1112
1113 class GlobalCodeBlock : public CodeBlock {
1114 protected:
1115 GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
1116 : CodeBlock(CopyParsedBlock, other)
1117 {
1118 }
1119
1120 GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1121 : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
1122 {
1123 }
1124 };
1125
1126 class ProgramCodeBlock : public GlobalCodeBlock {
1127 public:
1128 ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
1129 : GlobalCodeBlock(CopyParsedBlock, other)
1130 {
1131 }
1132
1133 ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned firstLineColumnOffset)
1134 : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, firstLineColumnOffset)
1135 {
1136 }
1137
1138 #if ENABLE(JIT)
1139 protected:
1140 virtual CodeBlock* replacement() override;
1141 virtual DFG::CapabilityLevel capabilityLevelInternal() override;
1142 #endif
1143 };
1144
1145 class EvalCodeBlock : public GlobalCodeBlock {
1146 public:
1147 EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
1148 : GlobalCodeBlock(CopyParsedBlock, other)
1149 {
1150 }
1151
1152 EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider)
1153 : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, 0, 1)
1154 {
1155 }
1156
1157 const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); }
1158 unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); }
1159
1160 #if ENABLE(JIT)
1161 protected:
1162 virtual CodeBlock* replacement() override;
1163 virtual DFG::CapabilityLevel capabilityLevelInternal() override;
1164 #endif
1165
1166 private:
1167 UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); }
1168 };
1169
1170 class FunctionCodeBlock : public CodeBlock {
1171 public:
1172 FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
1173 : CodeBlock(CopyParsedBlock, other)
1174 {
1175 }
1176
1177 FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1178 : CodeBlock(ownerExecutable, unlinkedCodeBlock, scope, sourceProvider, sourceOffset, firstLineColumnOffset)
1179 {
1180 }
1181
1182 #if ENABLE(JIT)
1183 protected:
1184 virtual CodeBlock* replacement() override;
1185 virtual DFG::CapabilityLevel capabilityLevelInternal() override;
1186 #endif
1187 };
1188
1189 inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
1190 {
1191 RELEASE_ASSERT(inlineCallFrame);
1192 ExecutableBase* executable = inlineCallFrame->executable.get();
1193 RELEASE_ASSERT(executable->structure()->classInfo() == FunctionExecutable::info());
1194 return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
1195 }
1196
1197 inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
1198 {
1199 if (codeOrigin.inlineCallFrame)
1200 return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
1201 return baselineCodeBlock;
1202 }
1203
1204 inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
1205 {
1206 if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
1207 return CallFrame::argumentOffset(argument);
1208
1209 const SlowArgument* slowArguments = symbolTable()->slowArguments();
1210 if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
1211 return CallFrame::argumentOffset(argument);
1212
1213 ASSERT(slowArguments[argument].status == SlowArgument::Captured);
1214 return slowArguments[argument].index;
1215 }
1216
1217 inline bool CodeBlock::hasSlowArguments()
1218 {
1219 return !!symbolTable()->slowArguments();
1220 }
1221
1222 inline Register& ExecState::r(int index)
1223 {
1224 CodeBlock* codeBlock = this->codeBlock();
1225 if (codeBlock->isConstantRegisterIndex(index))
1226 return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1227 return this[index];
1228 }
1229
1230 inline Register& ExecState::uncheckedR(int index)
1231 {
1232 RELEASE_ASSERT(index < FirstConstantRegisterIndex);
1233 return this[index];
1234 }
1235
1236 inline JSValue ExecState::argumentAfterCapture(size_t argument)
1237 {
1238 if (argument >= argumentCount())
1239 return jsUndefined();
1240
1241 if (!codeBlock())
1242 return this[argumentOffset(argument)].jsValue();
1243
1244 return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
1245 }
1246
1247 inline void CodeBlockSet::mark(void* candidateCodeBlock)
1248 {
1249 // We have to check for 0 and -1 because those are used by the HashMap as markers.
1250 uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
1251
1252 // This checks for both of those nasty cases in one go.
1253 // 0 + 1 = 1
1254 // -1 + 1 = 0
1255 if (value + 1 <= 1)
1256 return;
1257
1258 CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
1259 if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
1260 return;
1261
1262 mark(codeBlock);
1263 }
1264
1265 inline void CodeBlockSet::mark(CodeBlock* codeBlock)
1266 {
1267 if (!codeBlock)
1268 return;
1269
1270 if (codeBlock->m_mayBeExecuting)
1271 return;
1272
1273 codeBlock->m_mayBeExecuting = true;
1274 // We might not have cleared the marks for this CodeBlock, but we need to visit it.
1275 codeBlock->m_visitAggregateHasBeenCalled = false;
1276 #if ENABLE(GGC)
1277 m_currentlyExecuting.append(codeBlock);
1278 #endif
1279 }
1280
1281 template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor)
1282 {
1283 switch (type()) {
1284 case ProgramExecutableType: {
1285 if (CodeBlock* codeBlock = jsCast<ProgramExecutable*>(this)->m_programCodeBlock.get())
1286 codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
1287 break;
1288 }
1289
1290 case EvalExecutableType: {
1291 if (CodeBlock* codeBlock = jsCast<EvalExecutable*>(this)->m_evalCodeBlock.get())
1292 codeBlock->forEachRelatedCodeBlock(std::forward<Functor>(functor));
1293 break;
1294 }
1295
1296 case FunctionExecutableType: {
1297 Functor f(std::forward<Functor>(functor));
1298 FunctionExecutable* executable = jsCast<FunctionExecutable*>(this);
1299 if (CodeBlock* codeBlock = executable->m_codeBlockForCall.get())
1300 codeBlock->forEachRelatedCodeBlock(f);
1301 if (CodeBlock* codeBlock = executable->m_codeBlockForConstruct.get())
1302 codeBlock->forEachRelatedCodeBlock(f);
1303 break;
1304 }
1305 default:
1306 RELEASE_ASSERT_NOT_REACHED();
1307 }
1308 }
1309
1310 } // namespace JSC
1311
1312 #endif // CodeBlock_h