]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/priority.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / kern / priority.c
index d88ade25df761f5541508cde9bc8a669ec054396..a74ff74d42655dfd4f25f0c3866589f56c4d1df9 100644 (file)
@@ -1,23 +1,31 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
+ * This file contains Original Code and/or Modifications of Original Code 
+ * as defined in and that are subject to the Apple Public Source License 
+ * Version 2.0 (the 'License'). You may not use this file except in 
+ * compliance with the License.  The rights granted to you under the 
+ * License may not be used to create, or enable the creation or 
+ * redistribution of, unlawful or unlicensed copies of an Apple operating 
+ * system, or to circumvent, violate, or enable the circumvention or 
+ * violation of, any terms of an Apple operating system software license 
+ * agreement.
+ *
+ * Please obtain a copy of the License at 
+ * http://www.opensource.apple.com/apsl/ and read it before using this 
+ * file.
+ *
+ * The Original Code and all software distributed under the License are 
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
+ * Please see the License for the specific language governing rights and 
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -57,8 +65,6 @@
  *     Clock primitives.
  */
 
-#include <cpus.h>
-
 #include <mach/boolean.h>
 #include <mach/kern_return.h>
 #include <mach/machine.h>
 #include <kern/thread.h>
 #include <kern/processor.h>
 #include <machine/machparam.h>
-#include <kern/sf.h>
-#include <kern/mk_sp.h>        /*** ??? fix so this can be removed ***/
-/*** ??? Should this file be MK SP-specific?  Or is it more general purpose? ***/
-
-
 
 /*
- *     USAGE_THRESHOLD is the amount by which usage must change to
- *     cause a priority shift that moves a thread between run queues.
- */
-
-#ifdef PRI_SHIFT_2
-#if    PRI_SHIFT_2 > 0
-#define        USAGE_THRESHOLD (((1 << PRI_SHIFT) + (1 << PRI_SHIFT_2)) << (2 + SCHED_SHIFT))
-#else  /* PRI_SHIFT_2 > 0 */
-#define        USAGE_THRESHOLD (((1 << PRI_SHIFT) - (1 << -(PRI_SHIFT_2))) << (2 + SCHED_SHIFT))
-#endif /* PRI_SHIFT_2 > 0 */
-#else  /* PRI_SHIFT_2 */
-#define USAGE_THRESHOLD        (1 << (PRI_SHIFT + 2 + SCHED_SHIFT))
-#endif /* PRI_SHIFT_2 */
-
-/*
- *     thread_quantum_update:
+ *     thread_quantum_expire:
  *
  *     Recalculate the quantum and priority for a thread.
- *     The number of ticks that has elapsed since we were last called
- *     is passed as "nticks."
  */
 
 void
-thread_quantum_update(
-       register int            mycpu,
-       register thread_t       thread,
-       int                                     nticks,
-       int                                     state)
+thread_quantum_expire(
+       timer_call_param_t      p0,
+       timer_call_param_t      p1)
 {
-       register int                            quantum;
-       register processor_t            myprocessor;
-       register processor_set_t        pset;
+       register processor_t            myprocessor = p0;
+       register thread_t                       thread = p1;
        spl_t                                           s;
 
-       myprocessor = cpu_to_processor(mycpu);
-       pset = myprocessor->processor_set;
+       s = splsched();
+       thread_lock(thread);
 
        /*
-        *      Account for thread's utilization of these ticks.
-        *      This assumes that there is *always* a current thread.
-        *      When the processor is idle, it should be the idle thread.
+        *      Check for fail-safe trip.
         */
+       if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
+               uint64_t                        new_computation;
 
-       /*
-        *      Update set_quantum and calculate the current quantum.
-        */
-       pset->set_quantum = pset->machine_quantum[
-                                                       (pset->runq.count > pset->processor_count) ?
-                                                                 pset->processor_count : pset->runq.count];
+               new_computation = myprocessor->quantum_end;
+               new_computation -= thread->computation_epoch;
+               if (new_computation + thread->computation_metered >
+                                                                                       max_unsafe_computation) {
 
-       if (myprocessor->runq.count != 0)
-               quantum = min_quantum;
-       else
-               quantum = pset->set_quantum;
+                       if (thread->sched_mode & TH_MODE_REALTIME) {
+                               thread->priority = DEPRESSPRI;
+
+                               thread->safe_mode |= TH_MODE_REALTIME;
+                               thread->sched_mode &= ~TH_MODE_REALTIME;
+                       }
+
+                       pset_share_incr(thread->processor_set);
+
+                       thread->safe_release = sched_tick + sched_safe_duration;
+                       thread->sched_mode |= (TH_MODE_FAILSAFE|TH_MODE_TIMESHARE);
+                       thread->sched_mode &= ~TH_MODE_PREEMPT;
+               }
+       }
                
        /*
-        *      Now recompute the priority of the thread if appropriate.
+        *      Recompute scheduled priority if appropriate.
         */
+       if (thread->sched_stamp != sched_tick)
+               update_priority(thread);
+       else
+       if (thread->sched_mode & TH_MODE_TIMESHARE) {
+               register uint32_t       delta;
 
-       {
-               s = splsched();
-               thread_lock(thread);
+               thread_timer_delta(thread, delta);
 
-               if (!(thread->policy & (POLICY_TIMESHARE|POLICY_RR|POLICY_FIFO))) {
-                       thread_unlock(thread);
-                       splx(s);
-                       return;
-               }
+               /*
+                *      Accumulate timesharing usage only
+                *      during contention for processor
+                *      resources.
+                */
+               if (thread->pri_shift < INT8_MAX)
+                       thread->sched_usage += delta;
 
-               if (thread->state&TH_IDLE) {
-                       /* Don't try to time-slice idle threads */
-                       myprocessor->first_quantum = TRUE;
-                       if (thread->sched_stamp != sched_tick)
-                               update_priority(thread);
-                       thread_unlock(thread);
-                       splx(s);
-                       ast_check();
-                       return;
-               }
+               thread->cpu_delta += delta;
 
-               myprocessor->quantum -= nticks;
                /*
-                *      Runtime quantum adjustment.  Use quantum_adj_index
-                *      to avoid synchronizing quantum expirations.
+                * Adjust the scheduled priority if
+                * the thread has not been promoted
+                * and is not depressed.
                 */
-               if (    quantum != myprocessor->last_quantum    &&
-                                       pset->processor_count > 1                                       ) {
-                       myprocessor->last_quantum = quantum;
-                       simple_lock(&pset->quantum_adj_lock);
-                       quantum = min_quantum + (pset->quantum_adj_index *
-                                                                                       (quantum - min_quantum)) / 
-                                                                                               (pset->processor_count - 1);
-                       if (++(pset->quantum_adj_index) >= pset->processor_count)
-                               pset->quantum_adj_index = 0;
-                       simple_unlock(&pset->quantum_adj_lock);
-               }
-               if (myprocessor->quantum <= 0) {
-                       if (thread->sched_stamp != sched_tick)
-                               update_priority(thread);
-                       else
-                       if (    thread->policy == POLICY_TIMESHARE              &&
-                                       thread->depress_priority < 0                            ) {
-                               thread_timer_delta(thread);
-                               thread->sched_usage += thread->sched_delta;
-                               thread->sched_delta = 0;
-                               compute_my_priority(thread);
-                       }
+               if (    !(thread->sched_mode & TH_MODE_PROMOTED)        &&
+                               !(thread->sched_mode & TH_MODE_ISDEPRESSED)             )
+                       compute_my_priority(thread);
+       }
+
+       /*
+        *      This quantum is up, give this thread another.
+        */
+       if (first_timeslice(myprocessor))
+               myprocessor->timeslice--;
+
+       thread_quantum_init(thread);
+       myprocessor->quantum_end += thread->current_quantum;
+       timer_call_enter1(&myprocessor->quantum_timer,
+                                                       thread, myprocessor->quantum_end);
+
+       thread_unlock(thread);
+
+       /*
+        * Check for and schedule ast if needed.
+        */
+       ast_check(myprocessor);
+
+       splx(s);
+}
+
+/*
+ *     Define shifts for simulating (5/8) ** n
+ *
+ *     Shift structures for holding update shifts.  Actual computation
+ *     is  usage = (usage >> shift1) +/- (usage >> abs(shift2))  where the
+ *     +/- is determined by the sign of shift 2.
+ */
+struct shift_data {
+       int     shift1;
+       int     shift2;
+};
+
+#define SCHED_DECAY_TICKS      32
+static struct shift_data       sched_decay_shifts[SCHED_DECAY_TICKS] = {
+       {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7},
+       {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13},
+       {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18},
+       {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}
+};
+
+/*
+ *     do_priority_computation:
+ *
+ *     Calculate the timesharing priority based upon usage and load.
+ */
+#define do_priority_computation(thread, pri)                                                   \
+       MACRO_BEGIN                                                                                                                     \
+       (pri) = (thread)->priority              /* start with base priority */          \
+           - ((thread)->sched_usage >> (thread)->pri_shift);                           \
+       if ((pri) < MINPRI_USER)                                                                                        \
+               (pri) = MINPRI_USER;                                                                                    \
+       else                                                                                                                            \
+       if ((pri) > MAXPRI_KERNEL)                                                                                      \
+               (pri) = MAXPRI_KERNEL;                                                                                  \
+       MACRO_END
+
+/*
+ *     set_priority:
+ *
+ *     Set the base priority of the thread
+ *     and reset its scheduled priority.
+ *
+ *     Called with the thread locked.
+ */
+void
+set_priority(
+       register thread_t       thread,
+       register int            priority)
+{
+       thread->priority = priority;
+       compute_priority(thread, FALSE);
+}
+
+/*
+ *     compute_priority:
+ *
+ *     Reset the scheduled priority of the thread
+ *     according to its base priority if the
+ *     thread has not been promoted or depressed.
+ *
+ *     Called with the thread locked.
+ */
+void
+compute_priority(
+       register thread_t       thread,
+       boolean_t                       override_depress)
+{
+       register int            priority;
+
+       if (    !(thread->sched_mode & TH_MODE_PROMOTED)                        &&
+                       (!(thread->sched_mode & TH_MODE_ISDEPRESSED)    ||
+                                override_depress                                                       )               ) {
+               if (thread->sched_mode & TH_MODE_TIMESHARE)
+                       do_priority_computation(thread, priority);
+               else
+                       priority = thread->priority;
+
+               set_sched_pri(thread, priority);
+       }
+}
+
+/*
+ *     compute_my_priority:
+ *
+ *     Reset the scheduled priority for
+ *     a timesharing thread.
+ *
+ *     Only for use on the current thread
+ *     if timesharing and not depressed.
+ *
+ *     Called with the thread locked.
+ */
+void
+compute_my_priority(
+       register thread_t       thread)
+{
+       register int            priority;
+
+       do_priority_computation(thread, priority);
+       assert(thread->runq == RUN_QUEUE_NULL);
+       thread->sched_pri = priority;
+}
+
+/*
+ *     update_priority
+ *
+ *     Perform housekeeping operations driven by scheduler tick.
+ *
+ *     Called with the thread locked.
+ */
+void
+update_priority(
+       register thread_t       thread)
+{
+       register unsigned       ticks;
+       register uint32_t       delta;
+
+       ticks = sched_tick - thread->sched_stamp;
+       assert(ticks != 0);
+       thread->sched_stamp += ticks;
+       thread->pri_shift = thread->processor_set->pri_shift;
+
+       /*
+        *      Gather cpu usage data.
+        */
+       thread_timer_delta(thread, delta);
+       if (ticks < SCHED_DECAY_TICKS) {
+               register struct shift_data      *shiftp;
 
-                       /*
-                        *      This quantum is up, give this thread another.
-                        */
-                       myprocessor->first_quantum = FALSE;
-                       if (thread->policy == POLICY_TIMESHARE)
-                               myprocessor->quantum += quantum;
-                       else
-                               myprocessor->quantum += min_quantum;
-               }
                /*
-                *      Recompute priority if appropriate.
+                *      Accumulate timesharing usage only
+                *      during contention for processor
+                *      resources.
                 */
+               if (thread->pri_shift < INT8_MAX)
+                       thread->sched_usage += delta;
+
+               thread->cpu_usage += delta + thread->cpu_delta;
+               thread->cpu_delta = 0;
+
+               shiftp = &sched_decay_shifts[ticks];
+               if (shiftp->shift2 > 0) {
+                   thread->cpu_usage =
+                                               (thread->cpu_usage >> shiftp->shift1) +
+                                               (thread->cpu_usage >> shiftp->shift2);
+                   thread->sched_usage =
+                                               (thread->sched_usage >> shiftp->shift1) +
+                                               (thread->sched_usage >> shiftp->shift2);
+               }
                else {
-                   if (thread->sched_stamp != sched_tick)
-                               update_priority(thread);
-                   else
-                       if (    thread->policy == POLICY_TIMESHARE              &&
-                                       thread->depress_priority < 0                            ) {
-                               thread_timer_delta(thread);
-                               if (thread->sched_delta >= USAGE_THRESHOLD) {
-                                   thread->sched_usage +=      thread->sched_delta;
-                                   thread->sched_delta = 0;
-                                   compute_my_priority(thread);
-                               }
+                   thread->cpu_usage =
+                                               (thread->cpu_usage >> shiftp->shift1) -
+                                               (thread->cpu_usage >> -(shiftp->shift2));
+                   thread->sched_usage =
+                                               (thread->sched_usage >> shiftp->shift1) -
+                                               (thread->sched_usage >> -(shiftp->shift2));
+               }
+       }
+       else {
+               thread->cpu_usage = thread->cpu_delta = 0;
+               thread->sched_usage = 0;
+       }
+
+       /*
+        *      Check for fail-safe release.
+        */
+       if (    (thread->sched_mode & TH_MODE_FAILSAFE)         &&
+                       thread->sched_stamp >= thread->safe_release             ) {
+               if (!(thread->safe_mode & TH_MODE_TIMESHARE)) {
+                       if (thread->safe_mode & TH_MODE_REALTIME) {
+                               thread->priority = BASEPRI_RTQUEUES;
+
+                               thread->sched_mode |= TH_MODE_REALTIME;
                        }
+
+                       thread->sched_mode &= ~TH_MODE_TIMESHARE;
+
+                       if (thread->state & TH_RUN)
+                               pset_share_decr(thread->processor_set);
+
+                       if (!(thread->sched_mode & TH_MODE_ISDEPRESSED))
+                               set_sched_pri(thread, thread->priority);
                }
 
-               thread_unlock(thread);
-               splx(s);
+               thread->safe_mode = 0;
+               thread->sched_mode &= ~TH_MODE_FAILSAFE;
+       }
+
+       /*
+        *      Recompute scheduled priority if appropriate.
+        */
+       if (    (thread->sched_mode & TH_MODE_TIMESHARE)        &&
+                       !(thread->sched_mode & TH_MODE_PROMOTED)        &&
+                       !(thread->sched_mode & TH_MODE_ISDEPRESSED)             ) {
+               register int            new_pri;
 
-               /*
-                * Check for and schedule ast if needed.
-                */
-               ast_check();
+               do_priority_computation(thread, new_pri);
+               if (new_pri != thread->sched_pri) {
+                       run_queue_t             runq;
+
+                       runq = run_queue_remove(thread);
+                       thread->sched_pri = new_pri;
+                       if (runq != RUN_QUEUE_NULL)
+                               thread_setrun(thread, SCHED_TAILQ);
+               }
        }
 }