+ int32_t jitExecuteCounter() const { return m_jitExecuteCounter.m_counter; }
+
+ unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+
+ // Check if the optimization threshold has been reached, and if not,
+ // adjust the heuristics accordingly. Returns true if the threshold has
+ // been reached.
+ bool checkIfOptimizationThresholdReached()
+ {
+ return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
+ }
+
+ // Call this to force the next optimization trigger to fire. This is
+ // rarely wise, since optimization triggers are typically more
+ // expensive than executing baseline code.
+ void optimizeNextInvocation()
+ {
+ m_jitExecuteCounter.setNewThreshold(0, this);
+ }
+
+ // Call this to prevent optimization from happening again. Note that
+ // optimization will still happen after roughly 2^29 invocations,
+ // so this is really meant to delay that as much as possible. This
+ // is called if optimization failed, and we expect it to fail in
+ // the future as well.
+ void dontOptimizeAnytimeSoon()
+ {
+ m_jitExecuteCounter.deferIndefinitely();
+ }
+
+ // Call this to reinitialize the counter to its starting state,
+ // forcing a warm-up to happen before the next optimization trigger
+ // fires. This is called in the CodeBlock constructor. It also
+ // makes sense to call this if an OSR exit occurred. Note that
+ // OSR exit code is code generated, so the value of the execute
+ // counter that this corresponds to is also available directly.
+ void optimizeAfterWarmUp()
+ {
+ m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
+ }
+
+ // Call this to force an optimization trigger to fire only after
+ // a lot of warm-up.
+ void optimizeAfterLongWarmUp()
+ {
+ m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
+ }
+
+ // Call this to cause an optimization trigger to fire soon, but
+ // not necessarily the next one. This makes sense if optimization
+ // succeeds. Successfuly optimization means that all calls are
+ // relinked to the optimized code, so this only affects call
+ // frames that are still executing this CodeBlock. The value here
+ // is tuned to strike a balance between the cost of OSR entry
+ // (which is too high to warrant making every loop back edge to
+ // trigger OSR immediately) and the cost of executing baseline
+ // code (which is high enough that we don't necessarily want to
+ // have a full warm-up). The intuition for calling this instead of
+ // optimizeNextInvocation() is for the case of recursive functions
+ // with loops. Consider that there may be N call frames of some
+ // recursive function, for a reasonably large value of N. The top
+ // one triggers optimization, and then returns, and then all of
+ // the others return. We don't want optimization to be triggered on
+ // each return, as that would be superfluous. It only makes sense
+ // to trigger optimization if one of those functions becomes hot
+ // in the baseline code.
+ void optimizeSoon()
+ {
+ m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this);
+ }
+
+ // The speculative JIT tracks its success rate, so that we can
+ // decide when to reoptimize. It's interesting to note that these
+ // counters may overflow without any protection. The success
+ // counter will overflow before the fail one does, becuase the
+ // fail one is used as a trigger to reoptimize. So the worst case
+ // is that the success counter overflows and we reoptimize without
+ // needing to. But this is harmless. If a method really did
+ // execute 2^32 times then compiling it again probably won't hurt
+ // anyone.
+
+ void countSpeculationSuccess()
+ {
+ m_speculativeSuccessCounter++;
+ }
+
+ void countSpeculationFailure()
+ {
+ m_speculativeFailCounter++;
+ }
+
+ uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
+ uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
+ uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; }
+
+ uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
+ uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
+ uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; }
+
+ static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
+ static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
+ static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); }
+
+#if ENABLE(JIT)
+ // The number of failures that triggers the use of the ratio.
+ unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); }
+ unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); }
+
+ bool shouldReoptimizeNow()
+ {
+ return (Options::desiredSpeculativeSuccessFailRatio *
+ speculativeFailCounter() >= speculativeSuccessCounter()
+ && speculativeFailCounter() >= largeFailCountThreshold())
+ || forcedOSRExitCounter() >=
+ Options::forcedOSRExitCountForReoptimization;
+ }
+
+ bool shouldReoptimizeFromLoopNow()
+ {
+ return (Options::desiredSpeculativeSuccessFailRatio *
+ speculativeFailCounter() >= speculativeSuccessCounter()
+ && speculativeFailCounter() >= largeFailCountThresholdForLoop())
+ || forcedOSRExitCounter() >=
+ Options::forcedOSRExitCountForReoptimization;
+ }
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+ bool shouldOptimizeNow();
+#else
+ bool shouldOptimizeNow() { return false; }
+#endif
+
+#if ENABLE(JIT)
+ void reoptimize()
+ {
+ ASSERT(replacement() != this);
+ ASSERT(replacement()->alternative() == this);
+ replacement()->tallyFrequentExitSites();
+ replacement()->jettison();
+ countReoptimization();
+ optimizeAfterWarmUp();
+ }
+#endif
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+ void dumpValueProfiles();
+#endif
+