]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/i386_timer.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_timer.c
index ae6eb1029bb129bcf870e17f28bec1e533a53a2b..1d56ea08efda88d59d9f80e992b95a89618f0e86 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
@@ -59,7 +59,7 @@
 uint32_t spurious_timers;
 
 /*
- *     Event timer interrupt.
+ *      Event timer interrupt.
  *
  * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
  *     that occur before the entire chain completes.
@@ -67,35 +67,35 @@ uint32_t spurious_timers;
  * XXX a better implementation would use a set of generic callouts and iterate over them
  */
 void
-timer_intr(int         user_mode,
-           uint64_t    rip)
+timer_intr(int          user_mode,
+    uint64_t    rip)
 {
-       uint64_t                abstime;
-       rtclock_timer_t         *mytimer;
-       cpu_data_t              *pp;
-       int64_t                 latency;
-       uint64_t                pmdeadline;
-       boolean_t               timer_processed = FALSE;
+       uint64_t                abstime;
+       rtclock_timer_t         *mytimer;
+       cpu_data_t              *pp;
+       int64_t                 latency;
+       uint64_t                pmdeadline;
+       boolean_t               timer_processed = FALSE;
 
        pp = current_cpu_datap();
 
        SCHED_STATS_TIMER_POP(current_processor());
 
-       abstime = mach_absolute_time();         /* Get the time now */
+       abstime = mach_absolute_time();         /* Get the time now */
 
        /* has a pending clock timer expired? */
-       mytimer = &pp->rtclock_timer;           /* Point to the event timer */
+       mytimer = &pp->rtclock_timer;           /* Point to the event timer */
 
        if ((timer_processed = ((mytimer->deadline <= abstime) ||
-                   (abstime >= (mytimer->queue.earliest_soft_deadline))))) {
+           (abstime >= (mytimer->queue.earliest_soft_deadline))))) {
                /*
                 * Log interrupt service latency (-ve value expected by tool)
                 * a non-PM event is expected next.
-                * The requested deadline may be earlier than when it was set 
+                * The requested deadline may be earlier than when it was set
                 * - use MAX to avoid reporting bogus latencies.
                 */
                latency = (int64_t) (abstime - MAX(mytimer->deadline,
-                                                  mytimer->when_set));
+                   mytimer->when_set));
                /* Log zero timer latencies when opportunistically processing
                 * coalesced timers.
                 */
@@ -105,12 +105,12 @@ timer_intr(int            user_mode,
                }
 
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
-                       DECR_TRAP_LATENCY | DBG_FUNC_NONE,
-                       -latency,
-                       ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)),
-                       user_mode, 0, 0);
+                   DECR_TRAP_LATENCY | DBG_FUNC_NONE,
+                   -latency,
+                   ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)),
+                   user_mode, 0, 0);
 
-               mytimer->has_expired = TRUE;    /* Remember that we popped */
+               mytimer->has_expired = TRUE;    /* Remember that we popped */
                mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
                mytimer->has_expired = FALSE;
 
@@ -122,37 +122,47 @@ timer_intr(int            user_mode,
        /* is it time for power management state change? */
        if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) {
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
-                       DECR_PM_DEADLINE | DBG_FUNC_START,
-                       0, 0, 0, 0, 0);
+                   DECR_PM_DEADLINE | DBG_FUNC_START,
+                   0, 0, 0, 0, 0);
                pmCPUDeadline(pp);
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
-                       DECR_PM_DEADLINE | DBG_FUNC_END,
-                       0, 0, 0, 0, 0);
+                   DECR_PM_DEADLINE | DBG_FUNC_END,
+                   0, 0, 0, 0, 0);
                timer_processed = TRUE;
+               abstime = mach_absolute_time(); /* Get the time again since we ran a bit */
+       }
+
+       uint64_t quantum_deadline = pp->quantum_timer_deadline;
+       /* is it the quantum timer expiration? */
+       if ((quantum_deadline <= abstime) && (quantum_deadline > 0)) {
+               pp->quantum_timer_deadline = 0;
+               quantum_timer_expire(abstime);
        }
 
        /* schedule our next deadline */
        x86_lcpu()->rtcDeadline = EndOfAllTime;
        timer_resync_deadlines();
 
-       if (__improbable(timer_processed == FALSE))
+       if (__improbable(timer_processed == FALSE)) {
                spurious_timers++;
+       }
 }
 
 /*
  * Set the clock deadline.
  */
-void timer_set_deadline(uint64_t deadline)
+void
+timer_set_deadline(uint64_t deadline)
 {
-       rtclock_timer_t         *mytimer;
-       spl_t                   s;
-       cpu_data_t              *pp;
+       rtclock_timer_t         *mytimer;
+       spl_t                   s;
+       cpu_data_t              *pp;
 
-       s = splclock();                         /* no interruptions */
+       s = splclock();                         /* no interruptions */
        pp = current_cpu_datap();
 
-       mytimer = &pp->rtclock_timer;           /* Point to the timer itself */
-       mytimer->deadline = deadline;           /* Set new expiration time */
+       mytimer = &pp->rtclock_timer;           /* Point to the timer itself */
+       mytimer->deadline = deadline;           /* Set new expiration time */
        mytimer->when_set = mach_absolute_time();
 
        timer_resync_deadlines();
@@ -160,6 +170,18 @@ void timer_set_deadline(uint64_t deadline)
        splx(s);
 }
 
+void
+quantum_timer_set_deadline(uint64_t deadline)
+{
+       cpu_data_t              *pp;
+       /* We should've only come into this path with interrupts disabled */
+       assert(ml_get_interrupts_enabled() == FALSE);
+
+       pp = current_cpu_datap();
+       pp->quantum_timer_deadline = deadline;
+       timer_resync_deadlines();
+}
+
 /*
  * Re-evaluate the outstanding deadlines and select the most proximate.
  *
@@ -168,32 +190,44 @@ void timer_set_deadline(uint64_t deadline)
 void
 timer_resync_deadlines(void)
 {
-       uint64_t                deadline = EndOfAllTime;
-       uint64_t                pmdeadline;
-       rtclock_timer_t         *mytimer;
-       spl_t                   s = splclock();
-       cpu_data_t              *pp;
-       uint32_t                decr;
+       uint64_t                deadline = EndOfAllTime;
+       uint64_t                pmdeadline;
+       uint64_t                quantum_deadline;
+       rtclock_timer_t         *mytimer;
+       spl_t                   s = splclock();
+       cpu_data_t              *pp;
+       uint32_t                decr;
 
        pp = current_cpu_datap();
-       if (!pp->cpu_running)
+       if (!pp->cpu_running) {
                /* There's really nothing to do if this processor is down */
                return;
+       }
 
        /*
         * If we have a clock timer set, pick that.
         */
        mytimer = &pp->rtclock_timer;
        if (!mytimer->has_expired &&
-           0 < mytimer->deadline && mytimer->deadline < EndOfAllTime)
+           0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) {
                deadline = mytimer->deadline;
+       }
 
        /*
         * If we have a power management deadline, see if that's earlier.
         */
        pmdeadline = pmCPUGetDeadline(pp);
-       if (0 < pmdeadline && pmdeadline < deadline)
+       if (0 < pmdeadline && pmdeadline < deadline) {
                deadline = pmdeadline;
+       }
+
+       /* If we have the quantum timer setup, check that */
+       quantum_deadline = pp->quantum_timer_deadline;
+       if ((quantum_deadline > 0) &&
+           (quantum_deadline < deadline)) {
+               deadline = quantum_deadline;
+       }
+
 
        /*
         * Go and set the "pop" event.
@@ -202,22 +236,30 @@ timer_resync_deadlines(void)
 
        /* Record non-PM deadline for latency tool */
        if (decr != 0 && deadline != pmdeadline) {
+               uint64_t queue_count = 0;
+               if (deadline != quantum_deadline) {
+                       /*
+                        * For non-quantum timer put the queue count
+                        * in the tracepoint.
+                        */
+                       queue_count = mytimer->queue.count;
+               }
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
-                       DECR_SET_DEADLINE | DBG_FUNC_NONE,
-                       decr, 2,
-                       deadline,
-                       mytimer->queue.count, 0);
+                   DECR_SET_DEADLINE | DBG_FUNC_NONE,
+                   decr, 2,
+                   deadline,
+                   queue_count, 0);
        }
        splx(s);
 }
 
 void
 timer_queue_expire_local(
-__unused void                  *arg)
+       __unused void                   *arg)
 {
-       rtclock_timer_t         *mytimer;
-       uint64_t                        abstime;
-       cpu_data_t                      *pp;
+       rtclock_timer_t         *mytimer;
+       uint64_t                        abstime;
+       cpu_data_t                      *pp;
 
        pp = current_cpu_datap();
 
@@ -234,11 +276,11 @@ __unused void                     *arg)
 
 void
 timer_queue_expire_rescan(
-__unused void                  *arg)
+       __unused void                   *arg)
 {
-       rtclock_timer_t         *mytimer;
-       uint64_t                abstime;
-       cpu_data_t              *pp;
+       rtclock_timer_t         *mytimer;
+       uint64_t                abstime;
+       cpu_data_t              *pp;
 
        assert(ml_get_interrupts_enabled() == FALSE);
        pp = current_cpu_datap();
@@ -264,11 +306,13 @@ int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl;
 #endif
 
 boolean_t
-timer_resort_threshold(uint64_t skew) {
-       if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME)
+timer_resort_threshold(uint64_t skew)
+{
+       if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME) {
                return TRUE;
-       else
+       } else {
                return FALSE;
+       }
 }
 
 /*
@@ -277,33 +321,35 @@ timer_resort_threshold(uint64_t skew) {
  */
 mpqueue_head_t *
 timer_queue_assign(
-    uint64_t        deadline)
+       uint64_t        deadline)
 {
-       cpu_data_t              *cdp = current_cpu_datap();
-       mpqueue_head_t          *queue;
+       cpu_data_t              *cdp = current_cpu_datap();
+       mpqueue_head_t          *queue;
 
        if (cdp->cpu_running) {
                queue = &cdp->rtclock_timer.queue;
 
-               if (deadline < cdp->rtclock_timer.deadline)
+               if (deadline < cdp->rtclock_timer.deadline) {
                        timer_set_deadline(deadline);
-       }
-       else
+               }
+       } else {
                queue = &cpu_datap(master_cpu)->rtclock_timer.queue;
+       }
 
-    return (queue);
+       return queue;
 }
 
 void
 timer_queue_cancel(
-    mpqueue_head_t  *queue,
-    uint64_t        deadline,
-    uint64_t        new_deadline)
+       mpqueue_head_t  *queue,
+       uint64_t        deadline,
+       uint64_t        new_deadline)
 {
-    if (queue == &current_cpu_datap()->rtclock_timer.queue) {
-        if (deadline < new_deadline)
-            timer_set_deadline(new_deadline);
-    }
+       if (queue == &current_cpu_datap()->rtclock_timer.queue) {
+               if (deadline < new_deadline) {
+                       timer_set_deadline(new_deadline);
+               }
+       }
 }
 
 /*
@@ -317,19 +363,19 @@ timer_queue_cancel(
 uint32_t
 timer_queue_migrate_cpu(int target_cpu)
 {
-       cpu_data_t      *target_cdp = cpu_datap(target_cpu);
-       cpu_data_t      *cdp = current_cpu_datap();
-       int             ntimers_moved;
+       cpu_data_t      *target_cdp = cpu_datap(target_cpu);
+       cpu_data_t      *cdp = current_cpu_datap();
+       int             ntimers_moved;
 
        assert(!ml_get_interrupts_enabled());
        assert(target_cpu != cdp->cpu_number);
        assert(target_cpu == master_cpu);
 
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
-               DECR_TIMER_MIGRATE | DBG_FUNC_START,
-               target_cpu,
-               cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32),
-               0, 0);
+           DECR_TIMER_MIGRATE | DBG_FUNC_START,
+           target_cpu,
+           cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >> 32),
+           0, 0);
 
        /*
         * Move timer requests from the local queue to the target processor's.
@@ -339,7 +385,7 @@ timer_queue_migrate_cpu(int target_cpu)
         * resync, the move of this and all later requests is aborted.
         */
        ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue,
-                                           &target_cdp->rtclock_timer.queue);
+           &target_cdp->rtclock_timer.queue);
 
        /*
         * Assuming we moved stuff, clear local deadline.
@@ -348,10 +394,10 @@ timer_queue_migrate_cpu(int target_cpu)
                cdp->rtclock_timer.deadline = EndOfAllTime;
                setPop(EndOfAllTime);
        }
+
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
-               DECR_TIMER_MIGRATE | DBG_FUNC_END,
-               target_cpu, ntimers_moved, 0, 0, 0);
+           DECR_TIMER_MIGRATE | DBG_FUNC_END,
+           target_cpu, ntimers_moved, 0, 0, 0);
 
        return ntimers_moved;
 }
@@ -392,12 +438,13 @@ static timer_coalescing_priority_params_ns_t tcoal_prio_params_init =
        .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC,
        .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC,
        .latency_qos_scale = {3, 2, 1, -2, -15, -15},
-       .latency_qos_ns_max ={1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC,
-                             75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC},
+       .latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC,
+                              75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC},
        .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE},
 };
 
-timer_coalescing_priority_params_ns_t * timer_call_get_priority_params(void)
+timer_coalescing_priority_params_ns_t *
+timer_call_get_priority_params(void)
 {
        return &tcoal_prio_params_init;
 }