]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/sched.h
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / kern / sched.h
index 347dc2f0fdca1f789337317580bdac1c54cd05b4..9532f4095c8e4d3725f400c110af4237251f04d8 100644 (file)
@@ -1,14 +1,19 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
@@ -18,7 +23,7 @@
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -78,7 +83,6 @@
 #define MAXPRI         (NRQS-1)
 #define MINPRI         IDLEPRI                 /* lowest legal priority schedulable */
 #define        IDLEPRI         0                               /* idle thread priority */
-#define DEPRESSPRI     MINPRI                  /* depress priority */
 
 /*
  *     High-level priority assignments
 #define BASEPRI_FOREGROUND     (BASEPRI_DEFAULT + 16)                          /* 47 */
 #define BASEPRI_BACKGROUND     (BASEPRI_DEFAULT + 15)                          /* 46 */
 #define BASEPRI_DEFAULT                (MAXPRI_USER - (NRQS / 4))                      /* 31 */
+#define MAXPRI_THROTTLE                (MINPRI + 4)                                            /*  4 */
 #define MINPRI_USER                    MINPRI                                                          /*  0 */
 
+#ifdef CONFIG_EMBEDDED
+#define DEPRESSPRI     MAXPRI_THROTTLE
+#else
+#define DEPRESSPRI     MINPRI                  /* depress priority */
+#endif
+
+/* Type used for thread->sched_mode and saved_mode */
+typedef enum {
+       TH_MODE_NONE = 0,                                       /* unassigned, usually for saved_mode only */
+       TH_MODE_REALTIME,                                       /* time constraints supplied */
+       TH_MODE_FIXED,                                          /* use fixed priorities, no decay */
+       TH_MODE_TIMESHARE,                                      /* use timesharing algorithm */
+       TH_MODE_FAIRSHARE                                       /* use fair-share scheduling */         
+} sched_mode_t;
+
 /*
  *     Macro to check for invalid priorities.
  */
 #define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI)
 
+struct runq_stats {
+       uint64_t                                count_sum;
+       uint64_t                                last_change_timestamp;
+};
+
+#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
+
 struct run_queue {
        int                                     highq;                          /* highest runnable queue */
        int                                     bitmap[NRQBM];          /* run queue bitmap array */
        int                                     count;                          /* # of threads total */
        int                                     urgency;                        /* level of preemption urgency */
        queue_head_t            queues[NRQS];           /* one for each priority */
+
+       struct runq_stats       runq_stats;
 };
 
-typedef struct run_queue       *run_queue_t;
-#define RUN_QUEUE_NULL         ((run_queue_t) 0)
+#endif /* defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY) */
 
-#define first_timeslice(processor)             ((processor)->timeslice > 0)
+struct rt_queue {
+       int                                     count;                          /* # of threads total */
+       queue_head_t            queue;                          /* all runnable RT threads */
 
-#define        processor_timeslice_setup(processor, thread)                    \
-MACRO_BEGIN                                                                                                            \
-       (processor)->timeslice =                                                                        \
-               ((thread)->sched_mode & TH_MODE_TIMESHARE)?                             \
-                       (processor)->processor_set->timeshare_quanta: 1;        \
-MACRO_END
+       struct runq_stats       runq_stats;
+};
 
-#define thread_quantum_init(thread)                                                    \
-MACRO_BEGIN                                                                                                    \
-       (thread)->current_quantum =                                                     \
-               ((thread)->sched_mode & TH_MODE_REALTIME)?                      \
-                       (thread)->realtime.computation: std_quantum;    \
-MACRO_END
+#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_FIXEDPRIORITY)
+struct fairshare_queue {
+       int                                     count;                          /* # of threads total */
+       queue_head_t            queue;                          /* all runnable threads demoted to fairshare scheduling */
+       
+       struct runq_stats       runq_stats;
+};
+#endif
 
-/* Invoked at splsched by a thread on itself */
-#define csw_needed(thread, processor) (                                                                                \
-       ((thread)->state & TH_SUSP)                                                                             ||              \
-       (first_timeslice(processor)?                                                                                    \
-        ((processor)->runq.highq > (thread)->sched_pri                         ||                      \
-         (processor)->processor_set->runq.highq > (thread)->sched_pri) :               \
-        ((processor)->runq.highq >= (thread)->sched_pri                        ||                      \
-         (processor)->processor_set->runq.highq >= (thread)->sched_pri))       )
+#if defined(CONFIG_SCHED_GRRR_CORE)
 
 /*
- *     Scheduler routines.
+ * We map standard Mach priorities to an abstract scale that more properly
+ * indicates how we want processor time allocated under contention.
  */
+typedef uint8_t        grrr_proportional_priority_t;
+typedef uint8_t grrr_group_index_t;
+
+#define NUM_GRRR_PROPORTIONAL_PRIORITIES       256
+#define MAX_GRRR_PROPORTIONAL_PRIORITY ((grrr_proportional_priority_t)255)
+
+#if 0
+#define NUM_GRRR_GROUPS 8                                      /* log(256) */
+#endif
+
+#define NUM_GRRR_GROUPS 64                                     /* 256/4 */
+
+struct grrr_group {
+       queue_chain_t                   priority_order;                         /* next greatest weight group */
+       grrr_proportional_priority_t            minpriority;
+       grrr_group_index_t              index;
+
+       queue_head_t                    clients;
+       int                                             count;
+       uint32_t                                weight;
+#if 0
+       uint32_t                                deferred_removal_weight;
+#endif
+       uint32_t                                work;
+       thread_t                                current_client;
+};
 
-/* Remove thread from its run queue */
-extern run_queue_t     run_queue_remove(
-                                               thread_t        thread);
+struct grrr_run_queue {
+       int                                     count;
+       uint32_t                        last_rescale_tick;
+       struct grrr_group       groups[NUM_GRRR_GROUPS];
+       queue_head_t            sorted_group_list;
+       uint32_t                        weight;
+       grrr_group_t            current_group;
+       
+       struct runq_stats   runq_stats;
+};
+
+#endif /* defined(CONFIG_SCHED_GRRR_CORE) */
+
+#define first_timeslice(processor)             ((processor)->timeslice > 0)
+
+extern struct rt_queue         rt_runq;
+
+/*
+ *     Scheduler routines.
+ */
 
 /* Handle quantum expiration for an executing thread */
 extern void            thread_quantum_expire(
                                        timer_call_param_t      processor,
                                        timer_call_param_t      thread);
 
-/* Called at splsched by a thread on itself */
-extern ast_t   csw_check(
-                                       thread_t                thread,
-                                       processor_t             processor);
+/* Context switch check for current processor */
+extern ast_t   csw_check(processor_t           processor);
 
+#if defined(CONFIG_SCHED_TRADITIONAL)
 extern uint32_t        std_quantum, min_std_quantum;
 extern uint32_t        std_quantum_us;
+#endif
+
+extern uint32_t thread_depress_time;
+extern uint32_t default_timeshare_computation;
+extern uint32_t default_timeshare_constraint;
 
 extern uint32_t        max_rt_quantum, min_rt_quantum;
 
 extern uint32_t        sched_cswtime;
 
+#if defined(CONFIG_SCHED_TRADITIONAL)
+
 /*
  *     Age usage (1 << SCHED_TICK_SHIFT) times per second.
  */
@@ -229,6 +300,10 @@ extern uint32_t    sched_cswtime;
 extern unsigned                sched_tick;
 extern uint32_t                sched_tick_interval;
 
+#endif /* CONFIG_SCHED_TRADITIONAL */
+
+extern uint64_t                sched_one_second_interval;
+
 /* Periodic computation of various averages */
 extern void            compute_averages(void);
 
@@ -238,34 +313,64 @@ extern void               compute_averunnable(
 extern void            compute_stack_target(
                                        void                    *arg);
 
+extern void            compute_memory_pressure(
+                                       void                    *arg);
+
+extern void            compute_zone_gc_throttle(
+                                       void                    *arg);
+
+extern void            compute_pmap_gc_throttle(
+                                       void                    *arg);
+
 /*
  *     Conversion factor from usage
  *     to priority.
  */
+#if defined(CONFIG_SCHED_TRADITIONAL)
 extern uint32_t                sched_pri_shift;
-
-/*
- *     Scaling factor for usage
- *     based on load.
- */
+extern uint32_t                sched_fixed_shift;
 extern int8_t          sched_load_shifts[NRQS];
+#endif
 
 extern int32_t         sched_poll_yield_shift;
-extern uint32_t                sched_safe_duration;
+extern uint64_t                sched_safe_duration;
+
+extern uint32_t                sched_run_count, sched_share_count;
+extern uint32_t                sched_load_average, sched_mach_factor;
+
+extern uint32_t                avenrun[3], mach_factor[3];
 
 extern uint64_t                max_unsafe_computation;
 extern uint64_t                max_poll_computation;
 
-extern uint32_t                avenrun[3], mach_factor[3];
+#define sched_run_incr()                       \
+MACRO_BEGIN                                    \
+         hw_atomic_add(&sched_run_count, 1);   \
+MACRO_END
+
+#define sched_run_decr()                       \
+MACRO_BEGIN                                    \
+       hw_atomic_sub(&sched_run_count, 1);     \
+MACRO_END
+
+#define sched_share_incr()                     \
+MACRO_BEGIN                                                                                    \
+       (void)hw_atomic_add(&sched_share_count, 1);             \
+MACRO_END
+
+#define sched_share_decr()                     \
+MACRO_BEGIN                                                                                    \
+       (void)hw_atomic_sub(&sched_share_count, 1);             \
+MACRO_END
 
 /*
  *     thread_timer_delta macro takes care of both thread timers.
  */
 #define thread_timer_delta(thread, delta)                                      \
 MACRO_BEGIN                                                                                                    \
-       (delta) = timer_delta(&(thread)->system_timer,                  \
+       (delta) = (typeof(delta))timer_delta(&(thread)->system_timer,                   \
                                                        &(thread)->system_timer_save);  \
-       (delta) += timer_delta(&(thread)->user_timer,                   \
+       (delta) += (typeof(delta))timer_delta(&(thread)->user_timer,                    \
                                                        &(thread)->user_timer_save);    \
 MACRO_END