X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8ad349bb6ed4a0be06e34c92be0d98b92e078db4..7ddcb079202367355dddccdfa4318e57d50318be:/osfmk/kern/sched_prim.c?ds=sidebyside diff --git a/osfmk/kern/sched_prim.c b/osfmk/kern/sched_prim.c index ff05df253..d7b959249 100644 --- a/osfmk/kern/sched_prim.c +++ b/osfmk/kern/sched_prim.c @@ -1,31 +1,29 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the - * License may not be used to create, or enable the creation or - * redistribution of, unlawful or unlicensed copies of an Apple operating - * system, or to circumvent, violate, or enable the circumvention or - * violation of, any terms of an Apple operating system software license - * agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and * limitations under the License. - * - * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ @@ -75,9 +73,12 @@ #include #include #include +#include #include #include +#include +#include #include #include @@ -102,10 +103,20 @@ #include #include +#include + #include -#ifdef __ppc__ -#include +#include + +struct rt_queue rt_runq; +#define RT_RUNQ ((processor_t)-1) +decl_simple_lock_data(static,rt_lock); + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) +static struct fairshare_queue fs_runq; +#define FS_RUNQ ((processor_t)-2) +decl_simple_lock_data(static,fs_lock); #endif #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ @@ -120,41 +131,210 @@ int max_poll_quanta = MAX_POLL_QUANTA; #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */ int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT; -uint64_t max_unsafe_computation; -uint32_t sched_safe_duration; uint64_t max_poll_computation; +uint64_t max_unsafe_computation; +uint64_t sched_safe_duration; + +#if defined(CONFIG_SCHED_TRADITIONAL) + uint32_t std_quantum; uint32_t min_std_quantum; uint32_t std_quantum_us; +#endif /* CONFIG_SCHED_TRADITIONAL */ + +uint32_t thread_depress_time; +uint32_t default_timeshare_computation; +uint32_t default_timeshare_constraint; + uint32_t max_rt_quantum; uint32_t min_rt_quantum; uint32_t sched_cswtime; -static uint32_t delay_idle_limit, delay_idle_spin; -static processor_t delay_idle( - processor_t processor, - thread_t self); +#if defined(CONFIG_SCHED_TRADITIONAL) unsigned sched_tick; uint32_t sched_tick_interval; -uint32_t sched_pri_shift; +uint32_t sched_pri_shift = INT8_MAX; +uint32_t sched_fixed_shift; + +static boolean_t sched_traditional_use_pset_runqueue = FALSE; + +__attribute__((always_inline)) +static inline run_queue_t runq_for_processor(processor_t processor) +{ + if (sched_traditional_use_pset_runqueue) + return &processor->processor_set->pset_runq; + else + return &processor->runq; +} + +__attribute__((always_inline)) +static inline void runq_consider_incr_bound_count(processor_t processor, thread_t thread) +{ + if (thread->bound_processor == PROCESSOR_NULL) + return; + + assert(thread->bound_processor == processor); + + if (sched_traditional_use_pset_runqueue) + processor->processor_set->pset_runq_bound_count++; + + processor->runq_bound_count++; +} + +__attribute__((always_inline)) +static inline void runq_consider_decr_bound_count(processor_t processor, thread_t thread) +{ + if (thread->bound_processor == PROCESSOR_NULL) + return; + + assert(thread->bound_processor == processor); + + if (sched_traditional_use_pset_runqueue) + processor->processor_set->pset_runq_bound_count--; + + processor->runq_bound_count--; +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +uint64_t sched_one_second_interval; + +uint32_t sched_run_count, sched_share_count; +uint32_t sched_load_average, sched_mach_factor; /* Forwards */ -void wait_queues_init(void); -static void load_shift_init(void); +#if defined(CONFIG_SCHED_TRADITIONAL) + +static void load_shift_init(void) __attribute__((section("__TEXT, initcode"))); +static void preempt_pri_init(void) __attribute__((section("__TEXT, initcode"))); + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +static thread_t thread_select( + thread_t thread, + processor_t processor); + +#if CONFIG_SCHED_IDLE_IN_PLACE +static thread_t thread_select_idle( + thread_t thread, + processor_t processor); +#endif + +thread_t processor_idle( + thread_t thread, + processor_t processor); + +#if defined(CONFIG_SCHED_TRADITIONAL) + +static thread_t steal_thread( + processor_set_t pset); + +static thread_t steal_thread_disabled( + processor_set_t pset) __attribute__((unused)); + -static thread_t choose_thread( - processor_set_t pset, +static thread_t steal_processor_thread( processor_t processor); static void thread_update_scan(void); +static void processor_setrun( + processor_t processor, + thread_t thread, + integer_t options); + +static boolean_t +processor_enqueue( + processor_t processor, + thread_t thread, + integer_t options); + +static boolean_t +processor_queue_remove( + processor_t processor, + thread_t thread); + +static boolean_t processor_queue_empty(processor_t processor); + +static boolean_t priority_is_urgent(int priority); + +static ast_t processor_csw_check(processor_t processor); + +static boolean_t processor_queue_has_priority(processor_t processor, + int priority, + boolean_t gte); + +static boolean_t should_current_thread_rechoose_processor(processor_t processor); + +static int sched_traditional_processor_runq_count(processor_t processor); + +static boolean_t sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor); + +static uint64_t sched_traditional_processor_runq_stats_count_sum(processor_t processor); + +static uint64_t sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor); +#endif + + +#if defined(CONFIG_SCHED_TRADITIONAL) + +static void +sched_traditional_init(void); + +static void +sched_traditional_timebase_init(void); + +static void +sched_traditional_processor_init(processor_t processor); + +static void +sched_traditional_pset_init(processor_set_t pset); + +static void +sched_traditional_with_pset_runqueue_init(void); + +#endif + +static void +sched_realtime_init(void) __attribute__((section("__TEXT, initcode"))); + +static void +sched_realtime_timebase_init(void); + +#if defined(CONFIG_SCHED_TRADITIONAL) +static void +sched_traditional_tick_continue(void); + +static uint32_t +sched_traditional_initial_quantum_size(thread_t thread); + +static sched_mode_t +sched_traditional_initial_thread_sched_mode(task_t parent_task); + +static boolean_t +sched_traditional_supports_timeshare_mode(void); + +static thread_t +sched_traditional_choose_thread( + processor_t processor, + int priority); + +#endif + +#if DEBUG +extern int debug_task; +#define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args) +#else +#define TLOG(a, fmt, args...) do {} while (0) +#endif + #if DEBUG static boolean_t thread_runnable( @@ -162,7 +342,6 @@ boolean_t thread_runnable( #endif /*DEBUG*/ - /* * State machine * @@ -189,43 +368,221 @@ boolean_t thread_runnable( * */ +#if defined(CONFIG_SCHED_TRADITIONAL) +int8_t sched_load_shifts[NRQS]; +int sched_preempt_pri[NRQBM]; +#endif + + +#if defined(CONFIG_SCHED_TRADITIONAL) + +const struct sched_dispatch_table sched_traditional_dispatch = { + sched_traditional_init, + sched_traditional_timebase_init, + sched_traditional_processor_init, + sched_traditional_pset_init, + sched_traditional_tick_continue, + sched_traditional_choose_thread, + steal_thread, + compute_priority, + choose_processor, + processor_enqueue, + processor_queue_shutdown, + processor_queue_remove, + processor_queue_empty, + priority_is_urgent, + processor_csw_check, + processor_queue_has_priority, + sched_traditional_initial_quantum_size, + sched_traditional_initial_thread_sched_mode, + sched_traditional_supports_timeshare_mode, + can_update_priority, + update_priority, + lightweight_update_priority, + sched_traditional_quantum_expire, + should_current_thread_rechoose_processor, + sched_traditional_processor_runq_count, + sched_traditional_processor_runq_stats_count_sum, + sched_traditional_fairshare_init, + sched_traditional_fairshare_runq_count, + sched_traditional_fairshare_runq_stats_count_sum, + sched_traditional_fairshare_enqueue, + sched_traditional_fairshare_dequeue, + sched_traditional_fairshare_queue_remove, + TRUE /* direct_dispatch_to_idle_processors */ +}; + +const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = { + sched_traditional_with_pset_runqueue_init, + sched_traditional_timebase_init, + sched_traditional_processor_init, + sched_traditional_pset_init, + sched_traditional_tick_continue, + sched_traditional_choose_thread, + steal_thread, + compute_priority, + choose_processor, + processor_enqueue, + processor_queue_shutdown, + processor_queue_remove, + sched_traditional_with_pset_runqueue_processor_queue_empty, + priority_is_urgent, + processor_csw_check, + processor_queue_has_priority, + sched_traditional_initial_quantum_size, + sched_traditional_initial_thread_sched_mode, + sched_traditional_supports_timeshare_mode, + can_update_priority, + update_priority, + lightweight_update_priority, + sched_traditional_quantum_expire, + should_current_thread_rechoose_processor, + sched_traditional_processor_runq_count, + sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum, + sched_traditional_fairshare_init, + sched_traditional_fairshare_runq_count, + sched_traditional_fairshare_runq_stats_count_sum, + sched_traditional_fairshare_enqueue, + sched_traditional_fairshare_dequeue, + sched_traditional_fairshare_queue_remove, + FALSE /* direct_dispatch_to_idle_processors */ +}; + +#endif + +const struct sched_dispatch_table *sched_current_dispatch = NULL; + /* - * Waiting protocols and implementation: + * Statically allocate a buffer to hold the longest possible + * scheduler description string, as currently implemented. + * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/ + * to export to userspace via sysctl(3). If either version + * changes, update the other. * - * Each thread may be waiting for exactly one event; this event - * is set using assert_wait(). That thread may be awakened either - * by performing a thread_wakeup_prim() on its event, - * or by directly waking that thread up with clear_wait(). - * - * The implementation of wait events uses a hash table. Each - * bucket is queue of threads having the same hash function - * value; the chain for the queue (linked list) is the run queue - * field. [It is not possible to be waiting and runnable at the - * same time.] - * - * Locks on both the thread and on the hash buckets govern the - * wait event field and the queue chain field. Because wakeup - * operations only have the event as an argument, the event hash - * bucket must be locked before any thread. - * - * Scheduling operations may also occur at interrupt level; therefore, - * interrupts below splsched() must be prevented when holding - * thread or hash bucket locks. - * - * The wait event hash table declarations are as follows: + * Note that in addition to being an upper bound on the strings + * in the kernel, it's also an exact parameter to PE_get_default(), + * which interrogates the device tree on some platforms. That + * API requires the caller know the exact size of the device tree + * property, so we need both a legacy size (32) and the current size + * (48) to deal with old and new device trees. The device tree property + * is similarly padded to a fixed size so that the same kernel image + * can run on multiple devices with different schedulers configured + * in the device tree. */ +#define SCHED_STRING_MAX_LENGTH (48) -#define NUMQUEUES 59 +char sched_string[SCHED_STRING_MAX_LENGTH]; +static enum sched_enum _sched_enum = sched_enum_unknown; -struct wait_queue wait_queues[NUMQUEUES]; - -#define wait_hash(event) \ - ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES) +void +sched_init(void) +{ + char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' }; + + /* Check for runtime selection of the scheduler algorithm */ + if (!PE_parse_boot_argn("sched", sched_arg, sizeof (sched_arg))) { + /* If no boot-args override, look in device tree */ + if (!PE_get_default("kern.sched", sched_arg, + SCHED_STRING_MAX_LENGTH)) { + sched_arg[0] = '\0'; + } + } -int8_t sched_load_shifts[NRQS]; + if (strlen(sched_arg) > 0) { + if (0) { + /* Allow pattern below */ +#if defined(CONFIG_SCHED_TRADITIONAL) + } else if (0 == strcmp(sched_arg, kSchedTraditionalString)) { + sched_current_dispatch = &sched_traditional_dispatch; + _sched_enum = sched_enum_traditional; + strlcpy(sched_string, kSchedTraditionalString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedTraditionalString); + } else if (0 == strcmp(sched_arg, kSchedTraditionalWithPsetRunqueueString)) { + sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch; + _sched_enum = sched_enum_traditional_with_pset_runqueue; + strlcpy(sched_string, kSchedTraditionalWithPsetRunqueueString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedTraditionalWithPsetRunqueueString); +#endif +#if defined(CONFIG_SCHED_PROTO) + } else if (0 == strcmp(sched_arg, kSchedProtoString)) { + sched_current_dispatch = &sched_proto_dispatch; + _sched_enum = sched_enum_proto; + strlcpy(sched_string, kSchedProtoString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedProtoString); +#endif +#if defined(CONFIG_SCHED_GRRR) + } else if (0 == strcmp(sched_arg, kSchedGRRRString)) { + sched_current_dispatch = &sched_grrr_dispatch; + _sched_enum = sched_enum_grrr; + strlcpy(sched_string, kSchedGRRRString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedGRRRString); +#endif +#if defined(CONFIG_SCHED_FIXEDPRIORITY) + } else if (0 == strcmp(sched_arg, kSchedFixedPriorityString)) { + sched_current_dispatch = &sched_fixedpriority_dispatch; + _sched_enum = sched_enum_fixedpriority; + strlcpy(sched_string, kSchedFixedPriorityString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedFixedPriorityString); + } else if (0 == strcmp(sched_arg, kSchedFixedPriorityWithPsetRunqueueString)) { + sched_current_dispatch = &sched_fixedpriority_with_pset_runqueue_dispatch; + _sched_enum = sched_enum_fixedpriority_with_pset_runqueue; + strlcpy(sched_string, kSchedFixedPriorityWithPsetRunqueueString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedFixedPriorityWithPsetRunqueueString); +#endif + } else { + panic("Unrecognized scheduler algorithm: %s", sched_arg); + } + } else { +#if defined(CONFIG_SCHED_TRADITIONAL) + sched_current_dispatch = &sched_traditional_dispatch; + _sched_enum = sched_enum_traditional; + strlcpy(sched_string, kSchedTraditionalString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedTraditionalString); +#elif defined(CONFIG_SCHED_PROTO) + sched_current_dispatch = &sched_proto_dispatch; + _sched_enum = sched_enum_proto; + strlcpy(sched_string, kSchedProtoString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedProtoString); +#elif defined(CONFIG_SCHED_GRRR) + sched_current_dispatch = &sched_grrr_dispatch; + _sched_enum = sched_enum_grrr; + strlcpy(sched_string, kSchedGRRRString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedGRRRString); +#elif defined(CONFIG_SCHED_FIXEDPRIORITY) + sched_current_dispatch = &sched_fixedpriority_dispatch; + _sched_enum = sched_enum_fixedpriority; + strlcpy(sched_string, kSchedFixedPriorityString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedFixedPriorityString); +#else +#error No default scheduler implementation +#endif + } + + SCHED(init)(); + SCHED(fairshare_init)(); + sched_realtime_init(); + ast_init(); + + SCHED(pset_init)(&pset0); + SCHED(processor_init)(master_processor); +} void -sched_init(void) +sched_timebase_init(void) +{ + uint64_t abstime; + + clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime); + sched_one_second_interval = abstime; + + SCHED(timebase_init)(); + sched_realtime_timebase_init(); +} + +#if defined(CONFIG_SCHED_TRADITIONAL) + +static void +sched_traditional_init(void) { /* * Calculate the timeslicing quantum @@ -237,18 +594,13 @@ sched_init(void) printf("standard timeslicing quantum is %d us\n", std_quantum_us); - sched_safe_duration = (2 * max_unsafe_quanta / default_preemption_rate) * - (1 << SCHED_TICK_SHIFT); - - wait_queues_init(); load_shift_init(); - pset_init(&default_pset); + preempt_pri_init(); sched_tick = 0; - ast_init(); } -void -sched_timebase_init(void) +static void +sched_traditional_timebase_init(void) { uint64_t abstime; uint32_t shift; @@ -257,29 +609,18 @@ sched_timebase_init(void) clock_interval_to_absolutetime_interval( std_quantum_us, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - std_quantum = abstime; + std_quantum = (uint32_t)abstime; /* smallest remaining quantum (250 us) */ clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - min_std_quantum = abstime; - - /* smallest rt computaton (50 us) */ - clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - min_rt_quantum = abstime; - - /* maximum rt computation (50 ms) */ - clock_interval_to_absolutetime_interval( - 50, 1000*NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - max_rt_quantum = abstime; + min_std_quantum = (uint32_t)abstime; /* scheduler tick interval */ clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - sched_tick_interval = abstime; + sched_tick_interval = (uint32_t)abstime; /* * Compute conversion factor from usage to @@ -288,31 +629,85 @@ sched_timebase_init(void) abstime = (abstime * 5) / 3; for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) abstime >>= 1; - sched_pri_shift = shift; + sched_fixed_shift = shift; max_unsafe_computation = max_unsafe_quanta * std_quantum; + sched_safe_duration = 2 * max_unsafe_quanta * std_quantum; + max_poll_computation = max_poll_quanta * std_quantum; + thread_depress_time = 1 * std_quantum; + default_timeshare_computation = std_quantum / 2; + default_timeshare_constraint = std_quantum; - /* delay idle constant(s) (60, 1 us) */ - clock_interval_to_absolutetime_interval(60, NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - delay_idle_limit = abstime; +} - clock_interval_to_absolutetime_interval(1, NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - delay_idle_spin = abstime; +static void +sched_traditional_processor_init(processor_t processor) +{ + if (!sched_traditional_use_pset_runqueue) { + run_queue_init(&processor->runq); + } + processor->runq_bound_count = 0; +} + +static void +sched_traditional_pset_init(processor_set_t pset) +{ + if (sched_traditional_use_pset_runqueue) { + run_queue_init(&pset->pset_runq); + } + pset->pset_runq_bound_count = 0; +} + +static void +sched_traditional_with_pset_runqueue_init(void) +{ + sched_traditional_init(); + sched_traditional_use_pset_runqueue = TRUE; } +#endif /* CONFIG_SCHED_TRADITIONAL */ + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) void -wait_queues_init(void) +sched_traditional_fairshare_init(void) +{ + simple_lock_init(&fs_lock, 0); + + fs_runq.count = 0; + queue_init(&fs_runq.queue); +} +#endif + +static void +sched_realtime_init(void) { - register int i; + simple_lock_init(&rt_lock, 0); + + rt_runq.count = 0; + queue_init(&rt_runq.queue); +} + +static void +sched_realtime_timebase_init(void) +{ + uint64_t abstime; + + /* smallest rt computaton (50 us) */ + clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + min_rt_quantum = (uint32_t)abstime; + + /* maximum rt computation (50 ms) */ + clock_interval_to_absolutetime_interval( + 50, 1000*NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + max_rt_quantum = (uint32_t)abstime; - for (i = 0; i < NUMQUEUES; i++) { - wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO); - } } +#if defined(CONFIG_SCHED_TRADITIONAL) + /* * Set up values for timeshare * loading factors. @@ -331,6 +726,20 @@ load_shift_init(void) } } +static void +preempt_pri_init(void) +{ + int i, *p = sched_preempt_pri; + + for (i = BASEPRI_FOREGROUND + 1; i < MINPRI_KERNEL; ++i) + setbit(i, p); + + for (i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) + setbit(i, p); +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* * Thread wait timer expiration. */ @@ -354,6 +763,8 @@ thread_timer_expire( splx(s); } +#ifndef __LP64__ + /* * thread_set_timer: * @@ -374,7 +785,7 @@ thread_set_timer( thread_lock(thread); if ((thread->state & TH_WAIT) != 0) { clock_interval_to_deadline(interval, scale_factor, &deadline); - if (!timer_call_enter(&thread->wait_timer, deadline)) + if (!timer_call_enter(&thread->wait_timer, deadline, thread->sched_pri >= BASEPRI_RTQUEUES ? TIMER_CALL_CRITICAL : 0)) thread->wait_timer_active++; thread->wait_timer_is_set = TRUE; } @@ -392,7 +803,7 @@ thread_set_timer_deadline( s = splsched(); thread_lock(thread); if ((thread->state & TH_WAIT) != 0) { - if (!timer_call_enter(&thread->wait_timer, deadline)) + if (!timer_call_enter(&thread->wait_timer, deadline, thread->sched_pri >= BASEPRI_RTQUEUES ? TIMER_CALL_CRITICAL : 0)) thread->wait_timer_active++; thread->wait_timer_is_set = TRUE; } @@ -417,6 +828,8 @@ thread_cancel_timer(void) splx(s); } +#endif /* __LP64__ */ + /* * thread_unblock: * @@ -434,12 +847,12 @@ thread_unblock( boolean_t result = FALSE; /* - * Set wait_result. + * Set wait_result. */ thread->wait_result = wresult; /* - * Cancel pending wait timer. + * Cancel pending wait timer. */ if (thread->wait_timer_is_set) { if (timer_call_cancel(&thread->wait_timer)) @@ -448,33 +861,45 @@ thread_unblock( } /* - * Update scheduling state. + * Update scheduling state: not waiting, + * set running. */ thread->state &= ~(TH_WAIT|TH_UNINT); if (!(thread->state & TH_RUN)) { thread->state |= TH_RUN; - /* - * Mark unblocked if call out. - */ - if (thread->options & TH_OPT_CALLOUT) - call_thread_unblock(); + (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread); /* - * Update pset run counts. + * Update run counts. */ - pset_run_incr(thread->processor_set); - if (thread->sched_mode & TH_MODE_TIMESHARE) - pset_share_incr(thread->processor_set); + sched_run_incr(); + if (thread->sched_mode == TH_MODE_TIMESHARE) + sched_share_incr(); } - else + else { + /* + * Signal if idling on another processor. + */ +#if CONFIG_SCHED_IDLE_IN_PLACE + if (thread->state & TH_IDLE) { + processor_t processor = thread->last_processor; + + if (processor != current_processor()) + machine_signal_idle(processor); + } +#else + assert((thread->state & TH_IDLE) == 0); +#endif + result = TRUE; + } /* * Calculate deadline for real-time threads. */ - if (thread->sched_mode & TH_MODE_REALTIME) { + if (thread->sched_mode == TH_MODE_REALTIME) { thread->realtime.deadline = mach_absolute_time(); thread->realtime.deadline += thread->realtime.constraint; } @@ -488,7 +913,9 @@ thread_unblock( KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE, - (int)thread, (int)thread->sched_pri, 0, 0, 0); + (uintptr_t)thread_tid(thread), thread->sched_pri, 0, 0, 0); + + DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info); return (result); } @@ -540,6 +967,8 @@ thread_mark_wait_locked( { boolean_t at_safe_point; + assert(thread == current_thread()); + /* * The thread may have certain types of interrupts/aborts masked * off. Even if the wait location says these types of interrupts @@ -552,16 +981,19 @@ thread_mark_wait_locked( at_safe_point = (interruptible == THREAD_ABORTSAFE); if ( interruptible == THREAD_UNINT || - !(thread->state & TH_ABORT) || + !(thread->sched_flags & TH_SFLAG_ABORT) || (!at_safe_point && - (thread->state & TH_ABORT_SAFELY)) ) { + (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) { + + DTRACE_SCHED(sleep); + thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT); thread->at_safe_point = at_safe_point; return (thread->wait_result = THREAD_WAITING); } else - if (thread->state & TH_ABORT_SAFELY) - thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); + if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) + thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; return (thread->wait_result = THREAD_INTERRUPTED); } @@ -656,7 +1088,7 @@ assert_wait_timeout( thread_lock(thread); clock_interval_to_deadline(interval, scale_factor, &deadline); - wresult = wait_queue_assert_wait64_locked(wqueue, (uint32_t)event, + wresult = wait_queue_assert_wait64_locked(wqueue, CAST_DOWN(event64_t, event), interruptible, deadline, thread); thread_unlock(thread); @@ -684,7 +1116,7 @@ assert_wait_deadline( wait_queue_lock(wqueue); thread_lock(thread); - wresult = wait_queue_assert_wait64_locked(wqueue, (uint32_t)event, + wresult = wait_queue_assert_wait64_locked(wqueue, CAST_DOWN(event64_t,event), interruptible, deadline, thread); thread_unlock(thread); @@ -749,57 +1181,6 @@ thread_sleep_usimple_lock( return res; } -/* - * thread_sleep_mutex: - * - * Cause the current thread to wait until the specified event - * occurs. The specified mutex is unlocked before releasing - * the cpu. The mutex will be re-acquired before returning. - * - * JMM - Add hint to make sure mutex is available before rousting - */ -wait_result_t -thread_sleep_mutex( - event_t event, - mutex_t *mutex, - wait_interrupt_t interruptible) -{ - wait_result_t res; - - res = assert_wait(event, interruptible); - if (res == THREAD_WAITING) { - mutex_unlock(mutex); - res = thread_block(THREAD_CONTINUE_NULL); - mutex_lock(mutex); - } - return res; -} - -/* - * thread_sleep_mutex_deadline: - * - * Cause the current thread to wait until the specified event - * (or deadline) occurs. The specified mutex is unlocked before - * releasing the cpu. The mutex will be re-acquired before returning. - */ -wait_result_t -thread_sleep_mutex_deadline( - event_t event, - mutex_t *mutex, - uint64_t deadline, - wait_interrupt_t interruptible) -{ - wait_result_t res; - - res = assert_wait_deadline(event, interruptible, deadline); - if (res == THREAD_WAITING) { - mutex_unlock(mutex); - res = thread_block(THREAD_CONTINUE_NULL); - mutex_lock(mutex); - } - return res; -} - /* * thread_sleep_lock_write: * @@ -841,13 +1222,15 @@ thread_stop( thread_t thread) { wait_result_t wresult; - spl_t s; + spl_t s = splsched(); - s = splsched(); wake_lock(thread); + thread_lock(thread); while (thread->state & TH_SUSP) { thread->wake_active = TRUE; + thread_unlock(thread); + wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE); wake_unlock(thread); splx(s); @@ -860,21 +1243,20 @@ thread_stop( s = splsched(); wake_lock(thread); + thread_lock(thread); } - thread_lock(thread); thread->state |= TH_SUSP; while (thread->state & TH_RUN) { processor_t processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) + if (processor != PROCESSOR_NULL && processor->active_thread == thread) cause_ast_check(processor); - thread_unlock(thread); thread->wake_active = TRUE; + thread_unlock(thread); + wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE); wake_unlock(thread); splx(s); @@ -929,10 +1311,11 @@ thread_unstop( if (thread->wake_active) { thread->wake_active = FALSE; thread_unlock(thread); + + thread_wakeup(&thread->wake_active); wake_unlock(thread); splx(s); - thread_wakeup(&thread->wake_active); return; } } @@ -961,13 +1344,12 @@ thread_wait( while (thread->state & TH_RUN) { processor_t processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) + if (processor != PROCESSOR_NULL && processor->active_thread == thread) cause_ast_check(processor); - thread_unlock(thread); thread->wake_active = TRUE; + thread_unlock(thread); + wresult = assert_wait(&thread->wake_active, THREAD_UNINT); wake_unlock(thread); splx(s); @@ -1007,7 +1389,7 @@ clear_wait_internal( wait_result_t wresult) { wait_queue_t wq = thread->wait_queue; - int i = LockTimeOut; + uint32_t i = LockTimeOut; do { if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) @@ -1031,9 +1413,9 @@ clear_wait_internal( } return (thread_go(thread, wresult)); - } while (--i > 0); + } while ((--i > 0) || machine_timeout_suspended()); - panic("clear_wait_internal: deadlock: thread=0x%x, wq=0x%x, cpu=%d\n", + panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n", thread, wq, cpu_number()); return (KERN_FAILURE); @@ -1078,23 +1460,34 @@ kern_return_t thread_wakeup_prim( event_t event, boolean_t one_thread, - wait_result_t result) + wait_result_t result) { - register wait_queue_t wq; + return (thread_wakeup_prim_internal(event, one_thread, result, -1)); +} + + +kern_return_t +thread_wakeup_prim_internal( + event_t event, + boolean_t one_thread, + wait_result_t result, + int priority) +{ + register wait_queue_t wq; register int index; index = wait_hash(event); wq = &wait_queues[index]; if (one_thread) - return (wait_queue_wakeup_one(wq, event, result)); + return (wait_queue_wakeup_one(wq, event, result, priority)); else - return (wait_queue_wakeup_all(wq, event, result)); + return (wait_queue_wakeup_all(wq, event, result)); } /* * thread_bind: * - * Force a thread to execute on the specified processor. + * Force the current thread to execute on the specified processor. * * Returns the previous binding. PROCESSOR_NULL means * not bound. @@ -1103,150 +1496,429 @@ thread_wakeup_prim( */ processor_t thread_bind( - register thread_t thread, - processor_t processor) + processor_t processor) { + thread_t self = current_thread(); processor_t prev; - run_queue_t runq = RUN_QUEUE_NULL; spl_t s; s = splsched(); - thread_lock(thread); - prev = thread->bound_processor; - if (prev != PROCESSOR_NULL) - runq = run_queue_remove(thread); + thread_lock(self); - thread->bound_processor = processor; + prev = self->bound_processor; + self->bound_processor = processor; - if (runq != RUN_QUEUE_NULL) - thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); - thread_unlock(thread); + thread_unlock(self); splx(s); return (prev); } -struct { - uint32_t idle_pset_last, - idle_pset_any, - idle_bound; - - uint32_t pset_self, - pset_last, - pset_other, - bound_self, - bound_other; - - uint32_t realtime_self, - realtime_last, - realtime_other; - - uint32_t missed_realtime, - missed_other; -} dispatch_counts; - /* - * Select a thread for the current processor to run. + * thread_select: + * + * Select a new thread for the current processor to execute. * * May select the current thread, which must be locked. */ -thread_t +static thread_t thread_select( - register processor_t processor) + thread_t thread, + processor_t processor) { - register thread_t thread; - processor_set_t pset; - boolean_t other_runnable; + processor_set_t pset = processor->processor_set; + thread_t new_thread = THREAD_NULL; + boolean_t inactive_state; - /* - * Check for other non-idle runnable threads. - */ - pset = processor->processor_set; - thread = processor->active_thread; + assert(processor == current_processor()); + + do { + /* + * Update the priority. + */ + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); + + processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; - /* Update the thread's priority */ - if (thread->sched_stamp != sched_tick) - update_priority(thread); + pset_lock(pset); - processor->current_pri = thread->sched_pri; + assert(pset->low_count); + assert(pset->low_pri); + + inactive_state = processor->state != PROCESSOR_SHUTDOWN && machine_processor_is_inactive(processor); - simple_lock(&pset->sched_lock); - - other_runnable = processor->runq.count > 0 || pset->runq.count > 0; - - if ( thread->state == TH_RUN && - thread->processor_set == pset && - (thread->bound_processor == PROCESSOR_NULL || - thread->bound_processor == processor) ) { - if ( thread->sched_pri >= BASEPRI_RTQUEUES && - first_timeslice(processor) ) { - if (pset->runq.highq >= BASEPRI_RTQUEUES) { - register run_queue_t runq = &pset->runq; - register queue_t q; - - q = runq->queues + runq->highq; - if (((thread_t)q->next)->realtime.deadline < - processor->deadline) { - thread = (thread_t)q->next; - ((queue_entry_t)thread)->next->prev = q; - q->next = ((queue_entry_t)thread)->next; - thread->runq = RUN_QUEUE_NULL; - assert(thread->sched_mode & TH_MODE_PREEMPT); - runq->count--; runq->urgency--; - if (queue_empty(q)) { - if (runq->highq != IDLEPRI) - clrbit(MAXPRI - runq->highq, runq->bitmap); - runq->highq = MAXPRI - ffsbit(runq->bitmap); + simple_lock(&rt_lock); + + /* + * Test to see if the current thread should continue + * to run on this processor. Must be runnable, and not + * bound to a different processor, nor be in the wrong + * processor set. + */ + if ( ((thread->state & ~TH_SUSP) == TH_RUN) && + (thread->sched_pri >= BASEPRI_RTQUEUES || + processor->processor_meta == PROCESSOR_META_NULL || + processor->processor_meta->primary == processor) && + (thread->bound_processor == PROCESSOR_NULL || + thread->bound_processor == processor) && + (thread->affinity_set == AFFINITY_SET_NULL || + thread->affinity_set->aset_pset == pset) ) { + if ( thread->sched_pri >= BASEPRI_RTQUEUES && + first_timeslice(processor) ) { + if (rt_runq.count > 0) { + register queue_t q; + + q = &rt_runq.queue; + if (((thread_t)q->next)->realtime.deadline < + processor->deadline) { + thread = (thread_t)dequeue_head(q); + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count--; } } + + simple_unlock(&rt_lock); + + processor->deadline = thread->realtime.deadline; + + pset_unlock(pset); + + return (thread); } - processor->deadline = thread->realtime.deadline; + if (!inactive_state && (thread->sched_mode != TH_MODE_FAIRSHARE || SCHED(fairshare_runq_count)() == 0) && (rt_runq.count == 0 || BASEPRI_RTQUEUES < thread->sched_pri) && + (new_thread = SCHED(choose_thread)(processor, thread->sched_mode == TH_MODE_FAIRSHARE ? MINPRI : thread->sched_pri)) == THREAD_NULL) { + + simple_unlock(&rt_lock); + + /* I am the highest priority runnable (non-idle) thread */ + + pset_pri_hint(pset, processor, processor->current_pri); + + pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor)); + + processor->deadline = UINT64_MAX; + + pset_unlock(pset); + + return (thread); + } + } + + if (new_thread != THREAD_NULL || + (SCHED(processor_queue_has_priority)(processor, rt_runq.count == 0 ? IDLEPRI : BASEPRI_RTQUEUES, TRUE) && + (new_thread = SCHED(choose_thread)(processor, MINPRI)) != THREAD_NULL)) { + simple_unlock(&rt_lock); + + if (!inactive_state) { + pset_pri_hint(pset, processor, new_thread->sched_pri); + + pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor)); + } + + processor->deadline = UINT64_MAX; + pset_unlock(pset); + + return (new_thread); + } + + if (rt_runq.count > 0) { + thread = (thread_t)dequeue_head(&rt_runq.queue); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count--; - simple_unlock(&pset->sched_lock); + simple_unlock(&rt_lock); + + processor->deadline = thread->realtime.deadline; + pset_unlock(pset); return (thread); } - if ( (!other_runnable || - (processor->runq.highq < thread->sched_pri && - pset->runq.highq < thread->sched_pri)) ) { + simple_unlock(&rt_lock); - /* I am the highest priority runnable (non-idle) thread */ + /* No realtime threads and no normal threads on the per-processor + * runqueue. Finally check for global fairshare threads. + */ + if ((new_thread = SCHED(fairshare_dequeue)()) != THREAD_NULL) { processor->deadline = UINT64_MAX; + pset_unlock(pset); + + return (new_thread); + } + + processor->deadline = UINT64_MAX; + + /* + * Set processor inactive based on + * indication from the platform code. + */ + if (inactive_state) { + if (processor->state == PROCESSOR_RUNNING) + remqueue((queue_entry_t)processor); + else + if (processor->state == PROCESSOR_IDLE) + remqueue((queue_entry_t)processor); - simple_unlock(&pset->sched_lock); + processor->state = PROCESSOR_INACTIVE; - return (thread); + pset_unlock(pset); + + return (processor->idle_thread); } - } - if (other_runnable) - thread = choose_thread(pset, processor); - else { + /* + * No runnable threads, attempt to steal + * from other processors. + */ + new_thread = SCHED(steal_thread)(pset); + if (new_thread != THREAD_NULL) { + return (new_thread); + } + + /* + * If other threads have appeared, shortcut + * around again. + */ + if (!SCHED(processor_queue_empty)(processor) || rt_runq.count > 0 || SCHED(fairshare_runq_count)() > 0) + continue; + + pset_lock(pset); + /* * Nothing is runnable, so set this processor idle if it - * was running. Return its idle thread. + * was running. */ if (processor->state == PROCESSOR_RUNNING) { - remqueue(&pset->active_queue, (queue_entry_t)processor); + remqueue((queue_entry_t)processor); processor->state = PROCESSOR_IDLE; - enqueue_tail(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count++; + if (processor->processor_meta == PROCESSOR_META_NULL || processor->processor_meta->primary == processor) { + enqueue_head(&pset->idle_queue, (queue_entry_t)processor); + pset_pri_init_hint(pset, processor); + pset_count_init_hint(pset, processor); + } + else { + enqueue_head(&processor->processor_meta->idle_queue, (queue_entry_t)processor); + pset_unlock(pset); + return (processor->idle_thread); + } } - processor->deadline = UINT64_MAX; + pset_unlock(pset); + +#if CONFIG_SCHED_IDLE_IN_PLACE + /* + * Choose idle thread if fast idle is not possible. + */ + if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES) + return (processor->idle_thread); + + /* + * Perform idling activities directly without a + * context switch. Return dispatched thread, + * else check again for a runnable thread. + */ + new_thread = thread_select_idle(thread, processor); + +#else /* !CONFIG_SCHED_IDLE_IN_PLACE */ + + /* + * Do a full context switch to idle so that the current + * thread can start running on another processor without + * waiting for the fast-idled processor to wake up. + */ + return (processor->idle_thread); + +#endif /* !CONFIG_SCHED_IDLE_IN_PLACE */ + + } while (new_thread == THREAD_NULL); + + return (new_thread); +} + +#if CONFIG_SCHED_IDLE_IN_PLACE +/* + * thread_select_idle: + * + * Idle the processor using the current thread context. + * + * Called with thread locked, then dropped and relocked. + */ +static thread_t +thread_select_idle( + thread_t thread, + processor_t processor) +{ + thread_t new_thread; + + if (thread->sched_mode == TH_MODE_TIMESHARE) + sched_share_decr(); + sched_run_decr(); + + thread->state |= TH_IDLE; + processor->current_pri = IDLEPRI; + processor->current_thmode = TH_MODE_NONE; + + thread_unlock(thread); + + /* + * Switch execution timing to processor idle thread. + */ + processor->last_dispatch = mach_absolute_time(); + thread->last_run_time = processor->last_dispatch; + thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer); + PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer; + + /* + * Cancel the quantum timer while idling. + */ + timer_call_cancel(&processor->quantum_timer); + processor->timeslice = 0; + + (*thread->sched_call)(SCHED_CALL_BLOCK, thread); + + thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0); + + /* + * Enable interrupts and perform idling activities. No + * preemption due to TH_IDLE being set. + */ + spllo(); new_thread = processor_idle(thread, processor); - thread = processor->idle_thread; + /* + * Return at splsched. + */ + (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread); + + thread_lock(thread); + + /* + * If we idled in place, simulate a context switch back + * to the original priority of the thread so that the + * platform layer cannot distinguish this from a true + * switch to the idle thread. + */ + if (thread->sched_mode == TH_MODE_REALTIME) + thread_tell_urgency(THREAD_URGENCY_REAL_TIME, thread->realtime.period, thread->realtime.deadline); + /* Identify non-promoted threads which have requested a + * "background" priority. + */ + else if ((thread->sched_pri <= MAXPRI_THROTTLE) && + (thread->priority <= MAXPRI_THROTTLE)) + thread_tell_urgency(THREAD_URGENCY_BACKGROUND, thread->sched_pri, thread->priority); + else + thread_tell_urgency(THREAD_URGENCY_NORMAL, thread->sched_pri, thread->priority); + + /* + * If awakened, switch to thread timer and start a new quantum. + * Otherwise skip; we will context switch to another thread or return here. + */ + if (!(thread->state & TH_WAIT)) { + processor->last_dispatch = mach_absolute_time(); + thread_timer_event(processor->last_dispatch, &thread->system_timer); + PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; + + thread_quantum_init(thread); + thread->last_quantum_refill_time = processor->last_dispatch; + + processor->quantum_end = processor->last_dispatch + thread->current_quantum; + timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end, TIMER_CALL_CRITICAL); + processor->timeslice = 1; + + thread->computation_epoch = processor->last_dispatch; } - simple_unlock(&pset->sched_lock); + thread->state &= ~TH_IDLE; - return (thread); + sched_run_incr(); + if (thread->sched_mode == TH_MODE_TIMESHARE) + sched_share_incr(); + + return (new_thread); +} +#endif /* CONFIG_SCHED_IDLE_IN_PLACE */ + +#if defined(CONFIG_SCHED_TRADITIONAL) +static thread_t +sched_traditional_choose_thread( + processor_t processor, + int priority) +{ + thread_t thread; + + thread = choose_thread(processor, runq_for_processor(processor), priority); + if (thread != THREAD_NULL) { + runq_consider_decr_bound_count(processor, thread); + } + + return thread; +} + +#endif /* defined(CONFIG_SCHED_TRADITIONAL) */ + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_FIXEDPRIORITY) + +/* + * choose_thread: + * + * Locate a thread to execute from the processor run queue + * and return it. Only choose a thread with greater or equal + * priority. + * + * Associated pset must be locked. Returns THREAD_NULL + * on failure. + */ +thread_t +choose_thread( + processor_t processor, + run_queue_t rq, + int priority) +{ + queue_t queue = rq->queues + rq->highq; + int pri = rq->highq, count = rq->count; + thread_t thread; + + while (count > 0 && pri >= priority) { + thread = (thread_t)queue_first(queue); + while (!queue_end(queue, (queue_entry_t)thread)) { + if (thread->bound_processor == PROCESSOR_NULL || + thread->bound_processor == processor) { + remqueue((queue_entry_t)thread); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + rq->count--; + if (SCHED(priority_is_urgent)(pri)) { + rq->urgency--; assert(rq->urgency >= 0); + } + if (queue_empty(queue)) { + if (pri != IDLEPRI) + clrbit(MAXPRI - pri, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } + + return (thread); + } + count--; + + thread = (thread_t)queue_next((queue_entry_t)thread); + } + + queue--; pri--; + } + + return (THREAD_NULL); } +#endif /* defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_FIXEDPRIORITY) */ + /* * Perform a context switch and start executing the new thread. * @@ -1281,124 +1953,148 @@ MACRO_BEGIN \ } \ MACRO_END -boolean_t +static boolean_t thread_invoke( - register thread_t old_thread, - register thread_t new_thread, + register thread_t self, + register thread_t thread, ast_t reason) { - thread_continue_t new_cont, continuation = old_thread->continuation; - void *new_param, *parameter = old_thread->parameter; + thread_continue_t continuation = self->continuation; + void *parameter = self->parameter; processor_t processor; - thread_t prev_thread; - if (get_preemption_level() != 0) - panic("thread_invoke: preemption_level %d\n", - get_preemption_level()); + if (get_preemption_level() != 0) { + int pl = get_preemption_level(); + panic("thread_invoke: preemption_level %d, possible cause: %s", + pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" : + "blocking while holding a spinlock, or within interrupt context")); + } - assert(old_thread == current_thread()); + assert(self == current_thread()); /* * Mark thread interruptible. */ - thread_lock(new_thread); - new_thread->state &= ~TH_UNINT; + thread_lock(thread); + thread->state &= ~TH_UNINT; - assert(thread_runnable(new_thread)); +#if DEBUG + assert(thread_runnable(thread)); +#endif /* * Allow time constraint threads to hang onto * a stack. */ - if ( (old_thread->sched_mode & TH_MODE_REALTIME) && - !old_thread->reserved_stack ) { - old_thread->reserved_stack = old_thread->kernel_stack; - } + if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) + self->reserved_stack = self->kernel_stack; if (continuation != NULL) { - if (!new_thread->kernel_stack) { + if (!thread->kernel_stack) { /* - * If the old thread is using a privileged stack, + * If we are using a privileged stack, * check to see whether we can exchange it with - * that of the new thread. + * that of the other thread. */ - if ( old_thread->kernel_stack == old_thread->reserved_stack && - !new_thread->reserved_stack) + if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) goto need_stack; /* * Context switch by performing a stack handoff. */ - new_cont = new_thread->continuation; - new_thread->continuation = NULL; - new_param = new_thread->parameter; - new_thread->parameter = NULL; + continuation = thread->continuation; + parameter = thread->parameter; processor = current_processor(); - processor->active_thread = new_thread; - processor->current_pri = new_thread->sched_pri; - new_thread->last_processor = processor; - ast_context(new_thread); - thread_unlock(new_thread); - - current_task()->csw++; + processor->active_thread = thread; + processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; + if (thread->last_processor != processor && thread->last_processor != NULL) { + if (thread->last_processor->processor_set != processor->processor_set) + thread->ps_switch++; + thread->p_switch++; + } + thread->last_processor = processor; + thread->c_switch++; + ast_context(thread); + thread_unlock(thread); - old_thread->reason = reason; + self->reason = reason; processor->last_dispatch = mach_absolute_time(); - timer_event((uint32_t)processor->last_dispatch, - &new_thread->system_timer); - - thread_done(old_thread, new_thread, processor); + self->last_run_time = processor->last_dispatch; + thread_timer_event(processor->last_dispatch, &thread->system_timer); + PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); - machine_stack_handoff(old_thread, new_thread); + if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); + } - thread_begin(new_thread, processor); + DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info); - /* - * Now dispatch the old thread. - */ - thread_dispatch(old_thread); + SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri); + + TLOG(1, "thread_invoke: calling stack_handoff\n"); + stack_handoff(self, thread); - counter_always(c_thread_invoke_hits++); + DTRACE_SCHED(on__cpu); - funnel_refunnel_check(new_thread, 2); + thread_dispatch(self, thread); + + thread->continuation = thread->parameter = NULL; + + counter(c_thread_invoke_hits++); + + funnel_refunnel_check(thread, 2); (void) spllo(); - assert(new_cont); - call_continuation(new_cont, new_param, new_thread->wait_result); + assert(continuation); + call_continuation(continuation, parameter, thread->wait_result); /*NOTREACHED*/ } - else - if (new_thread == old_thread) { + else if (thread == self) { /* same thread but with continuation */ + ast_context(self); counter(++c_thread_invoke_same); - thread_unlock(new_thread); + thread_unlock(self); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); - funnel_refunnel_check(new_thread, 3); + self->continuation = self->parameter = NULL; + + funnel_refunnel_check(self, 3); (void) spllo(); - call_continuation(continuation, parameter, new_thread->wait_result); + call_continuation(continuation, parameter, self->wait_result); /*NOTREACHED*/ } } else { /* - * Check that the new thread has a stack + * Check that the other thread has a stack */ - if (!new_thread->kernel_stack) { + if (!thread->kernel_stack) { need_stack: - if (!stack_alloc_try(new_thread)) { - counter_always(c_thread_invoke_misses++); - thread_unlock(new_thread); - thread_stack_enqueue(new_thread); + if (!stack_alloc_try(thread)) { + counter(c_thread_invoke_misses++); + thread_unlock(thread); + thread_stack_enqueue(thread); return (FALSE); } } - else - if (new_thread == old_thread) { + else if (thread == self) { + ast_context(self); counter(++c_thread_invoke_same); - thread_unlock(new_thread); + thread_unlock(self); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + return (TRUE); } } @@ -1407,47 +2103,63 @@ need_stack: * Context switch by full context save. */ processor = current_processor(); - processor->active_thread = new_thread; - processor->current_pri = new_thread->sched_pri; - new_thread->last_processor = processor; - ast_context(new_thread); - assert(thread_runnable(new_thread)); - thread_unlock(new_thread); + processor->active_thread = thread; + processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; + if (thread->last_processor != processor && thread->last_processor != NULL) { + if (thread->last_processor->processor_set != processor->processor_set) + thread->ps_switch++; + thread->p_switch++; + } + thread->last_processor = processor; + thread->c_switch++; + ast_context(thread); + thread_unlock(thread); - counter_always(c_thread_invoke_csw++); - current_task()->csw++; + counter(c_thread_invoke_csw++); - assert(old_thread->runq == RUN_QUEUE_NULL); - old_thread->reason = reason; + assert(self->runq == PROCESSOR_NULL); + self->reason = reason; processor->last_dispatch = mach_absolute_time(); - timer_event((uint32_t)processor->last_dispatch, &new_thread->system_timer); + self->last_run_time = processor->last_dispatch; + thread_timer_event(processor->last_dispatch, &thread->system_timer); + PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); - thread_done(old_thread, new_thread, processor); + if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); + } + + DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info); + + SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri); /* * This is where we actually switch register context, - * and address space if required. Control will not - * return here immediately. + * and address space if required. We will next run + * as a result of a subsequent context switch. */ - prev_thread = machine_switch_context(old_thread, continuation, new_thread); + thread = machine_switch_context(self, continuation, thread); + TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread); - /* - * We are still old_thread, possibly on a different processor, - * and new_thread is now stale. - */ - thread_begin(old_thread, old_thread->last_processor); + DTRACE_SCHED(on__cpu); /* - * Now dispatch the thread which resumed us. + * We have been resumed and are set to run. */ - thread_dispatch(prev_thread); + thread_dispatch(thread, self); if (continuation) { - funnel_refunnel_check(old_thread, 3); + self->continuation = self->parameter = NULL; + + funnel_refunnel_check(self, 3); (void) spllo(); - call_continuation(continuation, parameter, old_thread->wait_result); + call_continuation(continuation, parameter, self->wait_result); /*NOTREACHED*/ } @@ -1455,184 +2167,178 @@ need_stack: } /* - * thread_done: + * thread_dispatch: * - * Perform calculations for thread - * finishing execution on the current processor. + * Handle threads at context switch. Re-dispatch other thread + * if still running, otherwise update run state and perform + * special actions. Update quantum for other thread and begin + * the quantum for ourselves. * * Called at splsched. */ void -thread_done( - thread_t old_thread, - thread_t new_thread, - processor_t processor) +thread_dispatch( + thread_t thread, + thread_t self) { - if (!(old_thread->state & TH_IDLE)) { + processor_t processor = self->last_processor; + + if (thread != THREAD_NULL) { /* - * Compute remainder of current quantum. + * If blocked at a continuation, discard + * the stack. */ - if ( first_timeslice(processor) && - processor->quantum_end > processor->last_dispatch ) - old_thread->current_quantum = - (processor->quantum_end - processor->last_dispatch); - else - old_thread->current_quantum = 0; + if (thread->continuation != NULL && thread->kernel_stack != 0) + stack_free(thread); + + if (!(thread->state & TH_IDLE)) { + wake_lock(thread); + thread_lock(thread); - if (old_thread->sched_mode & TH_MODE_REALTIME) { /* - * Cancel the deadline if the thread has - * consumed the entire quantum. + * Compute remainder of current quantum. */ - if (old_thread->current_quantum == 0) { - old_thread->realtime.deadline = UINT64_MAX; - old_thread->reason |= AST_QUANTUM; + if ( first_timeslice(processor) && + processor->quantum_end > processor->last_dispatch ) + thread->current_quantum = (uint32_t)(processor->quantum_end - processor->last_dispatch); + else + thread->current_quantum = 0; + + if (thread->sched_mode == TH_MODE_REALTIME) { + /* + * Cancel the deadline if the thread has + * consumed the entire quantum. + */ + if (thread->current_quantum == 0) { + thread->realtime.deadline = UINT64_MAX; + thread->reason |= AST_QUANTUM; + } + } else { +#if defined(CONFIG_SCHED_TRADITIONAL) + /* + * For non-realtime threads treat a tiny + * remaining quantum as an expired quantum + * but include what's left next time. + */ + if (thread->current_quantum < min_std_quantum) { + thread->reason |= AST_QUANTUM; + thread->current_quantum += std_quantum; + } +#endif } - } - else { + /* - * For non-realtime threads treat a tiny - * remaining quantum as an expired quantum - * but include what's left next time. + * If we are doing a direct handoff then + * take the remainder of the quantum. */ - if (old_thread->current_quantum < min_std_quantum) { - old_thread->reason |= AST_QUANTUM; - old_thread->current_quantum += std_quantum; + if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) { + self->current_quantum = thread->current_quantum; + thread->reason |= AST_QUANTUM; + thread->current_quantum = 0; } - } - /* - * If we are doing a direct handoff then - * give the remainder of our quantum to - * the next thread. - */ - if ((old_thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) { - new_thread->current_quantum = old_thread->current_quantum; - old_thread->reason |= AST_QUANTUM; - old_thread->current_quantum = 0; - } + thread->computation_metered += (processor->last_dispatch - thread->computation_epoch); - old_thread->last_switch = processor->last_dispatch; + if (!(thread->state & TH_WAIT)) { + /* + * Still running. + */ + if (thread->reason & AST_QUANTUM) + thread_setrun(thread, SCHED_TAILQ); + else + if (thread->reason & AST_PREEMPT) + thread_setrun(thread, SCHED_HEADQ); + else + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); - old_thread->computation_metered += - (old_thread->last_switch - old_thread->computation_epoch); - } -} + thread->reason = AST_NONE; -/* - * thread_begin: - * - * Set up for thread beginning execution on - * the current processor. - * - * Called at splsched. - */ -void -thread_begin( - thread_t thread, - processor_t processor) -{ - if (!(thread->state & TH_IDLE)) { - /* - * Give the thread a new quantum - * if none remaining. - */ - if (thread->current_quantum == 0) - thread_quantum_init(thread); - - /* - * Set up quantum timer and timeslice. - */ - processor->quantum_end = - (processor->last_dispatch + thread->current_quantum); - timer_call_enter1(&processor->quantum_timer, - thread, processor->quantum_end); + thread_unlock(thread); + wake_unlock(thread); + } + else { + /* + * Waiting. + */ + boolean_t should_terminate = FALSE; - processor_timeslice_setup(processor, thread); + /* Only the first call to thread_dispatch + * after explicit termination should add + * the thread to the termination queue + */ + if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) { + should_terminate = TRUE; + thread->state |= TH_TERMINATE2; + } - thread->last_switch = processor->last_dispatch; + thread->state &= ~TH_RUN; - thread->computation_epoch = thread->last_switch; - } - else { - timer_call_cancel(&processor->quantum_timer); - processor->timeslice = 1; - } -} + if (thread->sched_mode == TH_MODE_TIMESHARE) + sched_share_decr(); + sched_run_decr(); -/* - * thread_dispatch: - * - * Handle previous thread at context switch. Re-dispatch - * if still running, otherwise update run state and perform - * special actions. - * - * Called at splsched. - */ -void -thread_dispatch( - register thread_t thread) -{ - /* - * If blocked at a continuation, discard - * the stack. - */ -#ifndef i386 - if (thread->continuation != NULL && thread->kernel_stack) - stack_free(thread); -#endif + (*thread->sched_call)(SCHED_CALL_BLOCK, thread); - if (!(thread->state & TH_IDLE)) { - wake_lock(thread); - thread_lock(thread); + if (thread->wake_active) { + thread->wake_active = FALSE; + thread_unlock(thread); - if (!(thread->state & TH_WAIT)) { - /* - * Still running. - */ - if (thread->reason & AST_QUANTUM) - thread_setrun(thread, SCHED_TAILQ); - else - if (thread->reason & AST_PREEMPT) - thread_setrun(thread, SCHED_HEADQ); - else - thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); + thread_wakeup(&thread->wake_active); + } + else + thread_unlock(thread); - thread->reason = AST_NONE; + wake_unlock(thread); - thread_unlock(thread); - wake_unlock(thread); + if (should_terminate) + thread_terminate_enqueue(thread); + } } - else { - boolean_t wake; - - /* - * Waiting. - */ - thread->state &= ~TH_RUN; + } - wake = thread->wake_active; - thread->wake_active = FALSE; + if (!(self->state & TH_IDLE)) { - if (thread->sched_mode & TH_MODE_TIMESHARE) - pset_share_decr(thread->processor_set); - pset_run_decr(thread->processor_set); + if (self->sched_mode == TH_MODE_REALTIME) + thread_tell_urgency(THREAD_URGENCY_REAL_TIME, self->realtime.period, self->realtime.deadline); + /* Identify non-promoted threads which have requested a + * "background" priority. + */ + else if ((self->sched_pri <= MAXPRI_THROTTLE) && + (self->priority <= MAXPRI_THROTTLE)) + thread_tell_urgency(THREAD_URGENCY_BACKGROUND, self->sched_pri, self->priority); + else + thread_tell_urgency(THREAD_URGENCY_NORMAL, self->sched_pri, self->priority); + /* + * Get a new quantum if none remaining. + */ + if (self->current_quantum == 0) { + thread_quantum_init(self); + self->last_quantum_refill_time = processor->last_dispatch; + } - thread_unlock(thread); - wake_unlock(thread); + /* + * Set up quantum timer and timeslice. + */ + processor->quantum_end = (processor->last_dispatch + self->current_quantum); + timer_call_enter1(&processor->quantum_timer, self, processor->quantum_end, TIMER_CALL_CRITICAL); - if (thread->options & TH_OPT_CALLOUT) - call_thread_block(); + processor->timeslice = 1; - if (wake) - thread_wakeup((event_t)&thread->wake_active); + self->computation_epoch = processor->last_dispatch; + } + else { + timer_call_cancel(&processor->quantum_timer); + processor->timeslice = 0; - if (thread->state & TH_TERMINATE) - thread_terminate_enqueue(thread); - } + thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0); } } +#include + +uint32_t kdebug_thread_block = 0; + + /* * thread_block_reason: * @@ -1666,16 +2372,6 @@ thread_block_reason( processor = current_processor(); - /* - * Delay switching to the idle thread under certain conditions. - */ - if (s != FALSE && (self->state & (TH_IDLE|TH_TERMINATE|TH_WAIT)) == TH_WAIT) { - if ( processor->processor_set->processor_count > 1 && - processor->processor_set->runq.count == 0 && - processor->runq.count == 0 ) - processor = delay_idle(processor, self); - } - /* If we're explicitly yielding, force a subsequent quantum */ if (reason & AST_YIELD) processor->timeslice = 0; @@ -1686,16 +2382,20 @@ thread_block_reason( self->continuation = continuation; self->parameter = parameter; - thread_lock(self); - new_thread = thread_select(processor); - assert(new_thread && thread_runnable(new_thread)); - thread_unlock(self); - while (!thread_invoke(self, new_thread, reason)) { + if (__improbable(kdebug_thread_block && kdebug_enable && self->state != TH_RUN)) { + uint32_t bt[8]; + + OSBacktrace((void **)&bt[0], 8); + + KERNEL_DEBUG_CONSTANT(0x140004c | DBG_FUNC_START, bt[0], bt[1], bt[2], bt[3], 0); + KERNEL_DEBUG_CONSTANT(0x140004c | DBG_FUNC_END, bt[4], bt[5], bt[6], bt[7], 0); + } + + do { thread_lock(self); - new_thread = thread_select(processor); - assert(new_thread && thread_runnable(new_thread)); + new_thread = thread_select(self, processor); thread_unlock(self); - } + } while (!thread_invoke(self, new_thread, reason)); funnel_refunnel_check(self, 5); splx(s); @@ -1748,10 +2448,10 @@ thread_run( self->parameter = parameter; while (!thread_invoke(self, new_thread, handoff)) { - register processor_t processor = current_processor(); + processor_t processor = current_processor(); thread_lock(self); - new_thread = thread_select(processor); + new_thread = thread_select(self, processor); thread_unlock(self); handoff = AST_NONE; } @@ -1769,101 +2469,320 @@ thread_run( */ void thread_continue( - register thread_t old_thread) + register thread_t thread) { register thread_t self = current_thread(); register thread_continue_t continuation; register void *parameter; - + + DTRACE_SCHED(on__cpu); + continuation = self->continuation; - self->continuation = NULL; parameter = self->parameter; - self->parameter = NULL; - thread_begin(self, self->last_processor); + thread_dispatch(thread, self); - if (old_thread != THREAD_NULL) - thread_dispatch(old_thread); + self->continuation = self->parameter = NULL; funnel_refunnel_check(self, 4); - if (old_thread != THREAD_NULL) + if (thread != THREAD_NULL) (void)spllo(); + TLOG(1, "thread_continue: calling call_continuation \n"); call_continuation(continuation, parameter, self->wait_result); /*NOTREACHED*/ } +void +thread_quantum_init(thread_t thread) +{ + if (thread->sched_mode == TH_MODE_REALTIME) { + thread->current_quantum = thread->realtime.computation; + } else { + thread->current_quantum = SCHED(initial_quantum_size)(thread); + } +} + +#if defined(CONFIG_SCHED_TRADITIONAL) +static uint32_t +sched_traditional_initial_quantum_size(thread_t thread __unused) +{ + return std_quantum; +} + +static sched_mode_t +sched_traditional_initial_thread_sched_mode(task_t parent_task) +{ + if (parent_task == kernel_task) + return TH_MODE_FIXED; + else + return TH_MODE_TIMESHARE; +} + +static boolean_t +sched_traditional_supports_timeshare_mode(void) +{ + return TRUE; +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* - * Enqueue thread on run queue. Thread must be locked, - * and not already be on a run queue. Returns TRUE - * if a preemption is indicated based on the state - * of the run queue. + * run_queue_init: * - * Run queue must be locked, see run_queue_remove() - * for more info. + * Initialize a run queue before first use. */ -static boolean_t -run_queue_enqueue( - register run_queue_t rq, - register thread_t thread, - integer_t options) +void +run_queue_init( + run_queue_t rq) +{ + int i; + + rq->highq = IDLEPRI; + for (i = 0; i < NRQBM; i++) + rq->bitmap[i] = 0; + setbit(MAXPRI - IDLEPRI, rq->bitmap); + rq->urgency = rq->count = 0; + for (i = 0; i < NRQS; i++) + queue_init(&rq->queues[i]); +} + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) +int +sched_traditional_fairshare_runq_count(void) +{ + return fs_runq.count; +} + +uint64_t +sched_traditional_fairshare_runq_stats_count_sum(void) +{ + return fs_runq.runq_stats.count_sum; +} + +void +sched_traditional_fairshare_enqueue(thread_t thread) +{ + queue_t queue = &fs_runq.queue; + + simple_lock(&fs_lock); + + enqueue_tail(queue, (queue_entry_t)thread); + + thread->runq = FS_RUNQ; + SCHED_STATS_RUNQ_CHANGE(&fs_runq.runq_stats, fs_runq.count); + fs_runq.count++; + + simple_unlock(&fs_lock); +} + +thread_t +sched_traditional_fairshare_dequeue(void) +{ + thread_t thread; + + simple_lock(&fs_lock); + if (fs_runq.count > 0) { + thread = (thread_t)dequeue_head(&fs_runq.queue); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&fs_runq.runq_stats, fs_runq.count); + fs_runq.count--; + + simple_unlock(&fs_lock); + + return (thread); + } + simple_unlock(&fs_lock); + + return THREAD_NULL; +} + +boolean_t +sched_traditional_fairshare_queue_remove(thread_t thread) { - register int whichq = thread->sched_pri; - register queue_t queue = &rq->queues[whichq]; - boolean_t result = FALSE; + queue_t q; + + simple_lock(&fs_lock); + q = &fs_runq.queue; - assert(whichq >= MINPRI && whichq <= MAXPRI); + if (FS_RUNQ == thread->runq) { + remqueue((queue_entry_t)thread); + SCHED_STATS_RUNQ_CHANGE(&fs_runq.runq_stats, fs_runq.count); + fs_runq.count--; + + thread->runq = PROCESSOR_NULL; + simple_unlock(&fs_lock); + return (TRUE); + } + else { + /* + * The thread left the run queue before we could + * lock the run queue. + */ + assert(thread->runq == PROCESSOR_NULL); + simple_unlock(&fs_lock); + return (FALSE); + } +} + +#endif /* defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) */ + +/* + * run_queue_dequeue: + * + * Perform a dequeue operation on a run queue, + * and return the resulting thread. + * + * The run queue must be locked (see thread_run_queue_remove() + * for more info), and not empty. + */ +thread_t +run_queue_dequeue( + run_queue_t rq, + integer_t options) +{ + thread_t thread; + queue_t queue = rq->queues + rq->highq; + + if (options & SCHED_HEADQ) { + thread = (thread_t)dequeue_head(queue); + } + else { + thread = (thread_t)dequeue_tail(queue); + } - assert(thread->runq == RUN_QUEUE_NULL); + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + rq->count--; + if (SCHED(priority_is_urgent)(rq->highq)) { + rq->urgency--; assert(rq->urgency >= 0); + } if (queue_empty(queue)) { - enqueue_tail(queue, (queue_entry_t)thread); + if (rq->highq != IDLEPRI) + clrbit(MAXPRI - rq->highq, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } - setbit(MAXPRI - whichq, rq->bitmap); - if (whichq > rq->highq) { - rq->highq = whichq; + return (thread); +} + +/* + * run_queue_enqueue: + * + * Perform a enqueue operation on a run queue. + * + * The run queue must be locked (see thread_run_queue_remove() + * for more info). + */ +boolean_t +run_queue_enqueue( + run_queue_t rq, + thread_t thread, + integer_t options) +{ + queue_t queue = rq->queues + thread->sched_pri; + boolean_t result = FALSE; + + if (queue_empty(queue)) { + enqueue_tail(queue, (queue_entry_t)thread); + + setbit(MAXPRI - thread->sched_pri, rq->bitmap); + if (thread->sched_pri > rq->highq) { + rq->highq = thread->sched_pri; result = TRUE; } } else - if (options & SCHED_HEADQ) - enqueue_head(queue, (queue_entry_t)thread); - else - enqueue_tail(queue, (queue_entry_t)thread); - - thread->runq = rq; - if (thread->sched_mode & TH_MODE_PREEMPT) + if (options & SCHED_TAILQ) + enqueue_tail(queue, (queue_entry_t)thread); + else + enqueue_head(queue, (queue_entry_t)thread); + + if (SCHED(priority_is_urgent)(thread->sched_pri)) rq->urgency++; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); rq->count++; - + return (result); + +} + +/* + * run_queue_remove: + * + * Remove a specific thread from a runqueue. + * + * The run queue must be locked. + */ +void +run_queue_remove( + run_queue_t rq, + thread_t thread) +{ + + remqueue((queue_entry_t)thread); + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + rq->count--; + if (SCHED(priority_is_urgent)(thread->sched_pri)) { + rq->urgency--; assert(rq->urgency >= 0); + } + + if (queue_empty(rq->queues + thread->sched_pri)) { + /* update run queue status */ + if (thread->sched_pri != IDLEPRI) + clrbit(MAXPRI - thread->sched_pri, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } + + thread->runq = PROCESSOR_NULL; } /* - * Enqueue a thread for realtime execution, similar - * to above. Handles preemption directly. + * fairshare_setrun: + * + * Dispatch a thread for round-robin execution. + * + * Thread must be locked. Associated pset must + * be locked, and is returned unlocked. */ static void -realtime_schedule_insert( - register processor_set_t pset, - register thread_t thread) +fairshare_setrun( + processor_t processor, + thread_t thread) +{ + processor_set_t pset = processor->processor_set; + + thread->chosen_processor = processor; + + SCHED(fairshare_enqueue)(thread); + + if (processor != current_processor()) + machine_signal_idle(processor); + + pset_unlock(pset); + +} + +/* + * realtime_queue_insert: + * + * Enqueue a thread for realtime execution. + */ +static boolean_t +realtime_queue_insert( + thread_t thread) { - register run_queue_t rq = &pset->runq; - register int whichq = thread->sched_pri; - register queue_t queue = &rq->queues[whichq]; - uint64_t deadline = thread->realtime.deadline; - boolean_t try_preempt = FALSE; + queue_t queue = &rt_runq.queue; + uint64_t deadline = thread->realtime.deadline; + boolean_t preempt = FALSE; - assert(whichq >= BASEPRI_REALTIME && whichq <= MAXPRI); + simple_lock(&rt_lock); - assert(thread->runq == RUN_QUEUE_NULL); if (queue_empty(queue)) { enqueue_tail(queue, (queue_entry_t)thread); - - setbit(MAXPRI - whichq, rq->bitmap); - if (whichq > rq->highq) - rq->highq = whichq; - try_preempt = TRUE; + preempt = TRUE; } else { register thread_t entry = (thread_t)queue_first(queue); @@ -1879,365 +2798,768 @@ realtime_schedule_insert( } if ((queue_entry_t)entry == queue) - try_preempt = TRUE; + preempt = TRUE; insque((queue_entry_t)thread, (queue_entry_t)entry); } - thread->runq = rq; - assert(thread->sched_mode & TH_MODE_PREEMPT); - rq->count++; rq->urgency++; - - if (try_preempt) { - register processor_t processor; - - processor = current_processor(); - if ( pset == processor->processor_set && - (thread->sched_pri > processor->current_pri || - deadline < processor->deadline ) ) { - dispatch_counts.realtime_self++; - simple_unlock(&pset->sched_lock); - - ast_on(AST_PREEMPT | AST_URGENT); - return; - } - - if ( pset->processor_count > 1 || - pset != processor->processor_set ) { - processor_t myprocessor, lastprocessor; - queue_entry_t next; - - myprocessor = processor; - processor = thread->last_processor; - if ( processor != myprocessor && - processor != PROCESSOR_NULL && - processor->processor_set == pset && - processor->state == PROCESSOR_RUNNING && - (thread->sched_pri > processor->current_pri || - deadline < processor->deadline ) ) { - dispatch_counts.realtime_last++; - cause_ast_check(processor); - simple_unlock(&pset->sched_lock); - return; - } - - lastprocessor = processor; - queue = &pset->active_queue; - processor = (processor_t)queue_first(queue); - while (!queue_end(queue, (queue_entry_t)processor)) { - next = queue_next((queue_entry_t)processor); - - if ( processor != myprocessor && - processor != lastprocessor && - (thread->sched_pri > processor->current_pri || - deadline < processor->deadline ) ) { - if (!queue_end(queue, next)) { - remqueue(queue, (queue_entry_t)processor); - enqueue_tail(queue, (queue_entry_t)processor); - } - dispatch_counts.realtime_other++; - cause_ast_check(processor); - simple_unlock(&pset->sched_lock); - return; - } + thread->runq = RT_RUNQ; + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count++; - processor = (processor_t)next; - } - } - } + simple_unlock(&rt_lock); - simple_unlock(&pset->sched_lock); + return (preempt); } /* - * thread_setrun: + * realtime_setrun: * - * Dispatch thread for execution, directly onto an idle - * processor if possible. Else put on appropriate run - * queue. (local if bound, else processor set) + * Dispatch a thread for realtime execution. * - * Thread must be locked. + * Thread must be locked. Associated pset must + * be locked, and is returned unlocked. */ -void -thread_setrun( - register thread_t new_thread, - integer_t options) +static void +realtime_setrun( + processor_t processor, + thread_t thread) { - register processor_t processor; - register processor_set_t pset; - register thread_t thread; - ast_t preempt = (options & SCHED_PREEMPT)? - AST_PREEMPT: AST_NONE; + processor_set_t pset = processor->processor_set; - assert(thread_runnable(new_thread)); - - /* - * Update priority if needed. - */ - if (new_thread->sched_stamp != sched_tick) - update_priority(new_thread); + thread->chosen_processor = processor; /* - * Check for urgent preemption. + * Dispatch directly onto idle processor. */ - if (new_thread->sched_mode & TH_MODE_PREEMPT) - preempt = (AST_PREEMPT | AST_URGENT); + if ( (thread->bound_processor == processor) + && processor->state == PROCESSOR_IDLE) { + remqueue((queue_entry_t)processor); + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); - assert(new_thread->runq == RUN_QUEUE_NULL); + processor->next_thread = thread; + processor->deadline = thread->realtime.deadline; + processor->state = PROCESSOR_DISPATCHING; + pset_unlock(pset); - if ((processor = new_thread->bound_processor) == PROCESSOR_NULL) { - /* - * First try to dispatch on - * the last processor. - */ - pset = new_thread->processor_set; - processor = new_thread->last_processor; - if ( pset->processor_count > 1 && - processor != PROCESSOR_NULL && - processor->state == PROCESSOR_IDLE ) { - processor_lock(processor); - simple_lock(&pset->sched_lock); - if ( processor->processor_set == pset && - processor->state == PROCESSOR_IDLE ) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; - processor->next_thread = new_thread; - if (new_thread->sched_pri >= BASEPRI_RTQUEUES) - processor->deadline = new_thread->realtime.deadline; - else - processor->deadline = UINT64_MAX; - processor->state = PROCESSOR_DISPATCHING; - dispatch_counts.idle_pset_last++; - simple_unlock(&pset->sched_lock); - processor_unlock(processor); - if (processor != current_processor()) - machine_signal_idle(processor); - return; - } - processor_unlock(processor); - } - else - simple_lock(&pset->sched_lock); + if (processor != current_processor()) + machine_signal_idle(processor); + return; + } - /* - * Next pick any idle processor - * in the processor set. - */ - if (pset->idle_count > 0) { - processor = (processor_t)dequeue_head(&pset->idle_queue); - pset->idle_count--; - processor->next_thread = new_thread; - if (new_thread->sched_pri >= BASEPRI_RTQUEUES) - processor->deadline = new_thread->realtime.deadline; - else - processor->deadline = UINT64_MAX; - processor->state = PROCESSOR_DISPATCHING; - dispatch_counts.idle_pset_any++; - simple_unlock(&pset->sched_lock); - if (processor != current_processor()) - machine_signal_idle(processor); - return; + if (realtime_queue_insert(thread)) { + int prstate = processor->state; + if (processor == current_processor()) + ast_on(AST_PREEMPT | AST_URGENT); + else if ((prstate == PROCESSOR_DISPATCHING) || (prstate == PROCESSOR_IDLE)) + machine_signal_idle(processor); + else + cause_ast_check(processor); + } + + pset_unlock(pset); +} + +#if defined(CONFIG_SCHED_TRADITIONAL) + +static boolean_t +priority_is_urgent(int priority) +{ + return testbit(priority, sched_preempt_pri) ? TRUE : FALSE; +} + +/* + * processor_enqueue: + * + * Enqueue thread on a processor run queue. Thread must be locked, + * and not already be on a run queue. + * + * Returns TRUE if a preemption is indicated based on the state + * of the run queue. + * + * The run queue must be locked (see thread_run_queue_remove() + * for more info). + */ +static boolean_t +processor_enqueue( + processor_t processor, + thread_t thread, + integer_t options) +{ + run_queue_t rq = runq_for_processor(processor); + boolean_t result; + + result = run_queue_enqueue(rq, thread, options); + thread->runq = processor; + runq_consider_incr_bound_count(processor, thread); + + return (result); +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +/* + * processor_setrun: + * + * Dispatch a thread for execution on a + * processor. + * + * Thread must be locked. Associated pset must + * be locked, and is returned unlocked. + */ +static void +processor_setrun( + processor_t processor, + thread_t thread, + integer_t options) +{ + processor_set_t pset = processor->processor_set; + ast_t preempt; + + thread->chosen_processor = processor; + + /* + * Dispatch directly onto idle processor. + */ + if ( (SCHED(direct_dispatch_to_idle_processors) || + thread->bound_processor == processor) + && processor->state == PROCESSOR_IDLE) { + remqueue((queue_entry_t)processor); + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); + + processor->next_thread = thread; + processor->deadline = UINT64_MAX; + processor->state = PROCESSOR_DISPATCHING; + pset_unlock(pset); + + if (processor != current_processor()) + machine_signal_idle(processor); + return; + } + + /* + * Set preemption mode. + */ + if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) + preempt = (AST_PREEMPT | AST_URGENT); + else if(processor->active_thread && thread_eager_preemption(processor->active_thread)) + preempt = (AST_PREEMPT | AST_URGENT); + else + if ((thread->sched_mode == TH_MODE_TIMESHARE) && thread->sched_pri < thread->priority) + preempt = AST_NONE; + else + preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE; + + if (!SCHED(processor_enqueue)(processor, thread, options)) + preempt = AST_NONE; + + if (preempt != AST_NONE) { + if (processor == current_processor()) { + if (csw_check(processor) != AST_NONE) + ast_on(preempt); + } + else + if ( processor->state == PROCESSOR_IDLE || processor->state == PROCESSOR_DISPATCHING) { + machine_signal_idle(processor); + } + else + if ( (processor->state == PROCESSOR_RUNNING || + processor->state == PROCESSOR_SHUTDOWN) && + (thread->sched_pri >= processor->current_pri || + processor->current_thmode == TH_MODE_FAIRSHARE)) { + cause_ast_check(processor); } + } + else + if ( processor->state == PROCESSOR_SHUTDOWN && + thread->sched_pri >= processor->current_pri ) { + cause_ast_check(processor); + } + else + if ( processor->state == PROCESSOR_IDLE && + processor != current_processor() ) { + machine_signal_idle(processor); + } + + pset_unlock(pset); +} + +#if defined(CONFIG_SCHED_TRADITIONAL) + +static boolean_t +processor_queue_empty(processor_t processor) +{ + return runq_for_processor(processor)->count == 0; + +} + +static boolean_t +sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor) +{ + processor_set_t pset = processor->processor_set; + int count = runq_for_processor(processor)->count; + + /* + * The pset runq contains the count of all runnable threads + * for all processors in the pset. However, for threads that + * are bound to another processor, the current "processor" + * is not eligible to execute the thread. So we only + * include bound threads that our bound to the current + * "processor". This allows the processor to idle when the + * count of eligible threads drops to 0, even if there's + * a runnable thread bound to a different processor in the + * shared runq. + */ + + count -= pset->pset_runq_bound_count; + count += processor->runq_bound_count; + + return count == 0; +} + +static ast_t +processor_csw_check(processor_t processor) +{ + run_queue_t runq; + + assert(processor->active_thread != NULL); + + runq = runq_for_processor(processor); + if (runq->highq > processor->current_pri) { + if (runq->urgency > 0) + return (AST_PREEMPT | AST_URGENT); + + if (processor->active_thread && thread_eager_preemption(processor->active_thread)) + return (AST_PREEMPT | AST_URGENT); + + return AST_PREEMPT; + } + + return AST_NONE; +} + +static boolean_t +processor_queue_has_priority(processor_t processor, + int priority, + boolean_t gte) +{ + if (gte) + return runq_for_processor(processor)->highq >= priority; + else + return runq_for_processor(processor)->highq > priority; +} + +static boolean_t +should_current_thread_rechoose_processor(processor_t processor) +{ + return (processor->current_pri < BASEPRI_RTQUEUES + && processor->processor_meta != PROCESSOR_META_NULL + && processor->processor_meta->primary != processor); +} + +static int +sched_traditional_processor_runq_count(processor_t processor) +{ + return runq_for_processor(processor)->count; +} + + +static uint64_t +sched_traditional_processor_runq_stats_count_sum(processor_t processor) +{ + return runq_for_processor(processor)->runq_stats.count_sum; +} + +static uint64_t +sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor) +{ + if (processor->cpu_id == processor->processor_set->cpu_set_low) + return runq_for_processor(processor)->runq_stats.count_sum; + else + return 0ULL; +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +#define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets) + +/* + * choose_next_pset: + * + * Return the next sibling pset containing + * available processors. + * + * Returns the original pset if none other is + * suitable. + */ +static processor_set_t +choose_next_pset( + processor_set_t pset) +{ + processor_set_t nset = pset; + + do { + nset = next_pset(nset); + } while (nset->online_processor_count < 1 && nset != pset); + + return (nset); +} + +/* + * choose_processor: + * + * Choose a processor for the thread, beginning at + * the pset. Accepts an optional processor hint in + * the pset. + * + * Returns a processor, possibly from a different pset. + * + * The thread must be locked. The pset must be locked, + * and the resulting pset is locked on return. + */ +processor_t +choose_processor( + processor_set_t pset, + processor_t processor, + thread_t thread) +{ + processor_set_t nset, cset = pset; + processor_meta_t pmeta = PROCESSOR_META_NULL; + processor_t mprocessor; + + /* + * Prefer the hinted processor, when appropriate. + */ + + if (processor != PROCESSOR_NULL) { + if (processor->processor_meta != PROCESSOR_META_NULL) + processor = processor->processor_meta->primary; + } + + mprocessor = machine_choose_processor(pset, processor); + if (mprocessor != PROCESSOR_NULL) + processor = mprocessor; + + if (processor != PROCESSOR_NULL) { + if (processor->processor_set != pset || + processor->state == PROCESSOR_INACTIVE || + processor->state == PROCESSOR_SHUTDOWN || + processor->state == PROCESSOR_OFF_LINE) + processor = PROCESSOR_NULL; + else + if (processor->state == PROCESSOR_IDLE || + ((thread->sched_pri >= BASEPRI_RTQUEUES) && + (processor->current_pri < BASEPRI_RTQUEUES))) + return (processor); + } + + /* + * Iterate through the processor sets to locate + * an appropriate processor. + */ + do { + /* + * Choose an idle processor. + */ + if (!queue_empty(&cset->idle_queue)) + return ((processor_t)queue_first(&cset->idle_queue)); + + if (thread->sched_pri >= BASEPRI_RTQUEUES) { + integer_t lowest_priority = MAXPRI + 1; + integer_t lowest_unpaired = MAXPRI + 1; + uint64_t furthest_deadline = 1; + processor_t lp_processor = PROCESSOR_NULL; + processor_t lp_unpaired = PROCESSOR_NULL; + processor_t fd_processor = PROCESSOR_NULL; + + lp_processor = cset->low_pri; + /* Consider hinted processor */ + if (lp_processor != PROCESSOR_NULL && + ((lp_processor->processor_meta == PROCESSOR_META_NULL) || + ((lp_processor == lp_processor->processor_meta->primary) && + !queue_empty(&lp_processor->processor_meta->idle_queue))) && + lp_processor->state != PROCESSOR_INACTIVE && + lp_processor->state != PROCESSOR_SHUTDOWN && + lp_processor->state != PROCESSOR_OFF_LINE && + (lp_processor->current_pri < thread->sched_pri)) + return lp_processor; + + processor = (processor_t)queue_first(&cset->active_queue); + while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) { + /* Discover the processor executing the + * thread with the lowest priority within + * this pset, or the one with the furthest + * deadline + */ + integer_t cpri = processor->current_pri; + if (cpri < lowest_priority) { + lowest_priority = cpri; + lp_processor = processor; + } + + if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) { + furthest_deadline = processor->deadline; + fd_processor = processor; + } - if (new_thread->sched_pri >= BASEPRI_RTQUEUES) - realtime_schedule_insert(pset, new_thread); + + if (processor->processor_meta != PROCESSOR_META_NULL && + !queue_empty(&processor->processor_meta->idle_queue)) { + if (cpri < lowest_unpaired) { + lowest_unpaired = cpri; + lp_unpaired = processor; + pmeta = processor->processor_meta; + } + else + if (pmeta == PROCESSOR_META_NULL) + pmeta = processor->processor_meta; + } + processor = (processor_t)queue_next((queue_entry_t)processor); + } + + if (thread->sched_pri > lowest_unpaired) + return lp_unpaired; + + if (pmeta != PROCESSOR_META_NULL) + return ((processor_t)queue_first(&pmeta->idle_queue)); + if (thread->sched_pri > lowest_priority) + return lp_processor; + if (thread->realtime.deadline < furthest_deadline) + return fd_processor; + + processor = PROCESSOR_NULL; + } else { - if (!run_queue_enqueue(&pset->runq, new_thread, options)) - preempt = AST_NONE; + /* + * Check any hinted processors in the processor set if available. + */ + if (cset->low_pri != PROCESSOR_NULL && cset->low_pri->state != PROCESSOR_INACTIVE && + cset->low_pri->state != PROCESSOR_SHUTDOWN && cset->low_pri->state != PROCESSOR_OFF_LINE && + (processor == PROCESSOR_NULL || + (thread->sched_pri > BASEPRI_DEFAULT && cset->low_pri->current_pri < thread->sched_pri))) { + processor = cset->low_pri; + } + else + if (cset->low_count != PROCESSOR_NULL && cset->low_count->state != PROCESSOR_INACTIVE && + cset->low_count->state != PROCESSOR_SHUTDOWN && cset->low_count->state != PROCESSOR_OFF_LINE && + (processor == PROCESSOR_NULL || (thread->sched_pri <= BASEPRI_DEFAULT && + SCHED(processor_runq_count)(cset->low_count) < SCHED(processor_runq_count)(processor)))) { + processor = cset->low_count; + } + + /* + * Otherwise, choose an available processor in the set. + */ + if (processor == PROCESSOR_NULL) { + processor = (processor_t)dequeue_head(&cset->active_queue); + if (processor != PROCESSOR_NULL) + enqueue_tail(&cset->active_queue, (queue_entry_t)processor); + } + + if (processor != PROCESSOR_NULL && pmeta == PROCESSOR_META_NULL) { + if (processor->processor_meta != PROCESSOR_META_NULL && + !queue_empty(&processor->processor_meta->idle_queue)) + pmeta = processor->processor_meta; + } + } + + /* + * Move onto the next processor set. + */ + nset = next_pset(cset); + + if (nset != pset) { + pset_unlock(cset); + + cset = nset; + pset_lock(cset); + } + } while (nset != pset); + + /* + * Make sure that we pick a running processor, + * and that the correct processor set is locked. + */ + do { + if (pmeta != PROCESSOR_META_NULL) { + if (cset != pmeta->primary->processor_set) { + pset_unlock(cset); + + cset = pmeta->primary->processor_set; + pset_lock(cset); + } + + if (!queue_empty(&pmeta->idle_queue)) + return ((processor_t)queue_first(&pmeta->idle_queue)); + + pmeta = PROCESSOR_META_NULL; + } + + /* + * If we haven't been able to choose a processor, + * pick the boot processor and return it. + */ + if (processor == PROCESSOR_NULL) { + processor = master_processor; /* - * Update the timesharing quanta. + * Check that the correct processor set is + * returned locked. */ - timeshare_quanta_update(pset); + if (cset != processor->processor_set) { + pset_unlock(cset); + + cset = processor->processor_set; + pset_lock(cset); + } + + return (processor); + } + + /* + * Check that the processor set for the chosen + * processor is locked. + */ + if (cset != processor->processor_set) { + pset_unlock(cset); + + cset = processor->processor_set; + pset_lock(cset); + } + + /* + * We must verify that the chosen processor is still available. + */ + if (processor->state == PROCESSOR_INACTIVE || + processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE) + processor = PROCESSOR_NULL; + } while (processor == PROCESSOR_NULL); + + return (processor); +} + +/* + * thread_setrun: + * + * Dispatch thread for execution, onto an idle + * processor or run queue, and signal a preemption + * as appropriate. + * + * Thread must be locked. + */ +void +thread_setrun( + thread_t thread, + integer_t options) +{ + processor_t processor; + processor_set_t pset; + +#if DEBUG + assert(thread_runnable(thread)); +#endif + /* + * Update priority if needed. + */ + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); + + assert(thread->runq == PROCESSOR_NULL); + + if (thread->bound_processor == PROCESSOR_NULL) { + /* + * Unbound case. + */ + if (thread->affinity_set != AFFINITY_SET_NULL) { /* - * Preempt check. + * Use affinity set policy hint. */ - if (preempt != AST_NONE) { - /* - * First try the current processor - * if it is a member of the correct - * processor set. - */ - processor = current_processor(); - thread = processor->active_thread; - if ( pset == processor->processor_set && - csw_needed(thread, processor) ) { - dispatch_counts.pset_self++; - simple_unlock(&pset->sched_lock); - - ast_on(preempt); - return; - } + pset = thread->affinity_set->aset_pset; + pset_lock(pset); - /* - * If that failed and we have other - * processors available keep trying. - */ - if ( pset->processor_count > 1 || - pset != processor->processor_set ) { - queue_t queue = &pset->active_queue; - processor_t myprocessor, lastprocessor; - queue_entry_t next; - - /* - * Next try the last processor - * dispatched on. - */ - myprocessor = processor; - processor = new_thread->last_processor; - if ( processor != myprocessor && - processor != PROCESSOR_NULL && - processor->processor_set == pset && - processor->state == PROCESSOR_RUNNING && - new_thread->sched_pri > processor->current_pri ) { - dispatch_counts.pset_last++; - cause_ast_check(processor); - simple_unlock(&pset->sched_lock); - return; - } + processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread); + } + else + if (thread->last_processor != PROCESSOR_NULL) { + /* + * Simple (last processor) affinity case. + */ + processor = thread->last_processor; + pset = processor->processor_set; + pset_lock(pset); + processor = SCHED(choose_processor)(pset, processor, thread); - /* - * Lastly, pick any other - * available processor. - */ - lastprocessor = processor; - processor = (processor_t)queue_first(queue); - while (!queue_end(queue, (queue_entry_t)processor)) { - next = queue_next((queue_entry_t)processor); - - if ( processor != myprocessor && - processor != lastprocessor && - new_thread->sched_pri > - processor->current_pri ) { - if (!queue_end(queue, next)) { - remqueue(queue, (queue_entry_t)processor); - enqueue_tail(queue, (queue_entry_t)processor); - } - dispatch_counts.pset_other++; - cause_ast_check(processor); - simple_unlock(&pset->sched_lock); - return; - } - - processor = (processor_t)next; - } - } + if ((thread->last_processor != processor) && (thread->last_processor != PROCESSOR_NULL)) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LPA_BROKEN)|DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->last_processor->cpu_id, (uintptr_t)processor->cpu_id, thread->last_processor->state, 0); } + + } + else { + /* + * No Affinity case: + * + * Utilitize a per task hint to spread threads + * among the available processor sets. + */ + task_t task = thread->task; - simple_unlock(&pset->sched_lock); + pset = task->pset_hint; + if (pset == PROCESSOR_SET_NULL) + pset = current_processor()->processor_set; + + pset = choose_next_pset(pset); + pset_lock(pset); + + processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread); + task->pset_hint = processor->processor_set; } } else { - /* - * Bound, can only run on bound processor. Have to lock - * processor here because it may not be the current one. - */ - processor_lock(processor); + /* + * Bound case: + * + * Unconditionally dispatch on the processor. + */ + processor = thread->bound_processor; pset = processor->processor_set; - if (pset != PROCESSOR_SET_NULL) { - simple_lock(&pset->sched_lock); - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; - processor->next_thread = new_thread; - processor->deadline = UINT64_MAX; - processor->state = PROCESSOR_DISPATCHING; - dispatch_counts.idle_bound++; - simple_unlock(&pset->sched_lock); - processor_unlock(processor); - if (processor != current_processor()) - machine_signal_idle(processor); - return; - } - } - - if (!run_queue_enqueue(&processor->runq, new_thread, options)) - preempt = AST_NONE; - - if (preempt != AST_NONE) { - if (processor == current_processor()) { - thread = processor->active_thread; - if (csw_needed(thread, processor)) { - dispatch_counts.bound_self++; - ast_on(preempt); + pset_lock(pset); + } + + /* + * Dispatch the thread on the choosen processor. + */ + if (thread->sched_pri >= BASEPRI_RTQUEUES) + realtime_setrun(processor, thread); + else if (thread->sched_mode == TH_MODE_FAIRSHARE) + fairshare_setrun(processor, thread); + else + processor_setrun(processor, thread, options); +} + +processor_set_t +task_choose_pset( + task_t task) +{ + processor_set_t pset = task->pset_hint; + + if (pset != PROCESSOR_SET_NULL) + pset = choose_next_pset(pset); + + return (pset); +} + +#if defined(CONFIG_SCHED_TRADITIONAL) + +/* + * processor_queue_shutdown: + * + * Shutdown a processor run queue by + * re-dispatching non-bound threads. + * + * Associated pset must be locked, and is + * returned unlocked. + */ +void +processor_queue_shutdown( + processor_t processor) +{ + processor_set_t pset = processor->processor_set; + run_queue_t rq = runq_for_processor(processor); + queue_t queue = rq->queues + rq->highq; + int pri = rq->highq, count = rq->count; + thread_t next, thread; + queue_head_t tqueue; + + queue_init(&tqueue); + + while (count > 0) { + thread = (thread_t)queue_first(queue); + while (!queue_end(queue, (queue_entry_t)thread)) { + next = (thread_t)queue_next((queue_entry_t)thread); + + if (thread->bound_processor == PROCESSOR_NULL) { + remqueue((queue_entry_t)thread); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + runq_consider_decr_bound_count(processor, thread); + rq->count--; + if (SCHED(priority_is_urgent)(pri)) { + rq->urgency--; assert(rq->urgency >= 0); } + if (queue_empty(queue)) { + if (pri != IDLEPRI) + clrbit(MAXPRI - pri, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } + + enqueue_tail(&tqueue, (queue_entry_t)thread); } - else - if ( processor->state == PROCESSOR_RUNNING && - new_thread->sched_pri > processor->current_pri ) { - dispatch_counts.bound_other++; - cause_ast_check(processor); - } + count--; + + thread = next; } - if (pset != PROCESSOR_SET_NULL) - simple_unlock(&pset->sched_lock); + queue--; pri--; + } - processor_unlock(processor); + pset_unlock(pset); + + while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) { + thread_lock(thread); + + thread_setrun(thread, SCHED_TAILQ); + + thread_unlock(thread); } } +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* - * Check for a possible preemption point in - * the (current) thread. + * Check for a preemption point in + * the current context. * * Called at splsched. */ ast_t csw_check( - thread_t thread, processor_t processor) { - int current_pri = thread->sched_pri; - ast_t result = AST_NONE; - run_queue_t runq; - - if (first_timeslice(processor)) { - runq = &processor->processor_set->runq; - if (runq->highq >= BASEPRI_RTQUEUES) - return (AST_PREEMPT | AST_URGENT); - - if (runq->highq > current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); - - result |= AST_PREEMPT; - } + ast_t result = AST_NONE; - runq = &processor->runq; - if (runq->highq > current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (first_timeslice(processor)) { + if (rt_runq.count > 0) + return (AST_PREEMPT | AST_URGENT); - result |= AST_PREEMPT; - } + result |= SCHED(processor_csw_check)(processor); + if (result & AST_URGENT) + return result; } else { - runq = &processor->processor_set->runq; - if (runq->highq >= current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); - - result |= AST_PREEMPT; - } - - runq = &processor->runq; - if (runq->highq >= current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (rt_runq.count > 0 && BASEPRI_RTQUEUES >= processor->current_pri) + return (AST_PREEMPT | AST_URGENT); - result |= AST_PREEMPT; - } + result |= SCHED(processor_csw_check)(processor); + if (result & AST_URGENT) + return result; } if (result != AST_NONE) return (result); - if (thread->state & TH_SUSP) - result |= AST_PREEMPT; + if (SCHED(should_current_thread_rechoose_processor)(processor)) + return (AST_PREEMPT); + + if (machine_processor_is_inactive(processor)) + return (AST_PREEMPT); + + if (processor->active_thread->state & TH_SUSP) + return (AST_PREEMPT); - return (result); + return (AST_NONE); } /* @@ -2251,33 +3573,25 @@ csw_check( */ void set_sched_pri( - thread_t thread, - int priority) + thread_t thread, + int priority) { - register struct run_queue *rq = run_queue_remove(thread); - - if ( !(thread->sched_mode & TH_MODE_TIMESHARE) && - (priority >= BASEPRI_PREEMPT || - (thread->task_priority < MINPRI_KERNEL && - thread->task_priority >= BASEPRI_BACKGROUND && - priority > thread->task_priority) ) ) - thread->sched_mode |= TH_MODE_PREEMPT; - else - thread->sched_mode &= ~TH_MODE_PREEMPT; + boolean_t removed = thread_run_queue_remove(thread); thread->sched_pri = priority; - if (rq != RUN_QUEUE_NULL) + if (removed) thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); else if (thread->state & TH_RUN) { processor_t processor = thread->last_processor; if (thread == current_thread()) { - ast_t preempt = csw_check(thread, processor); + ast_t preempt; - if (preempt != AST_NONE) - ast_on(preempt); processor->current_pri = priority; + processor->current_thmode = thread->sched_mode; + if ((preempt = csw_check(processor)) != AST_NONE) + ast_on(preempt); } else if ( processor != PROCESSOR_NULL && @@ -2316,349 +3630,362 @@ run_queue_check( #endif /* DEBUG */ +#if defined(CONFIG_SCHED_TRADITIONAL) + +/* locks the runqueue itself */ + +static boolean_t +processor_queue_remove( + processor_t processor, + thread_t thread) +{ + void * rqlock; + run_queue_t rq; + + rqlock = &processor->processor_set->sched_lock; + rq = runq_for_processor(processor); + + simple_lock(rqlock); + if (processor == thread->runq) { + /* + * Thread is on a run queue and we have a lock on + * that run queue. + */ + runq_consider_decr_bound_count(processor, thread); + run_queue_remove(rq, thread); + } + else { + /* + * The thread left the run queue before we could + * lock the run queue. + */ + assert(thread->runq == PROCESSOR_NULL); + processor = PROCESSOR_NULL; + } + + simple_unlock(rqlock); + + return (processor != PROCESSOR_NULL); +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* - * run_queue_remove: + * thread_run_queue_remove: * - * Remove a thread from its current run queue and - * return the run queue if successful. + * Remove a thread from a current run queue and + * return TRUE if successful. * * Thread must be locked. */ -run_queue_t -run_queue_remove( - thread_t thread) +boolean_t +thread_run_queue_remove( + thread_t thread) { - register run_queue_t rq = thread->runq; + processor_t processor = thread->runq; /* - * If rq is RUN_QUEUE_NULL, the thread will stay out of the + * If processor is PROCESSOR_NULL, the thread will stay out of the * run queues because the caller locked the thread. Otherwise * the thread is on a run queue, but could be chosen for dispatch * and removed. */ - if (rq != RUN_QUEUE_NULL) { - processor_set_t pset = thread->processor_set; - processor_t processor = thread->bound_processor; + if (processor != PROCESSOR_NULL) { + queue_t q; /* - * The run queues are locked by the pset scheduling - * lock, except when a processor is off-line the - * local run queue is locked by the processor lock. + * The processor run queues are locked by the + * processor set. Real-time priorities use a + * global queue with a dedicated lock. */ - if (processor != PROCESSOR_NULL) { - processor_lock(processor); - pset = processor->processor_set; + if (thread->sched_mode == TH_MODE_FAIRSHARE) { + return SCHED(fairshare_queue_remove)(thread); + } + + if (thread->sched_pri < BASEPRI_RTQUEUES) { + return SCHED(processor_queue_remove)(processor, thread); } - if (pset != PROCESSOR_SET_NULL) - simple_lock(&pset->sched_lock); + simple_lock(&rt_lock); + q = &rt_runq.queue; - if (rq == thread->runq) { + if (processor == thread->runq) { /* * Thread is on a run queue and we have a lock on * that run queue. */ - remqueue(&rq->queues[0], (queue_entry_t)thread); - rq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - rq->urgency--; - assert(rq->urgency >= 0); - - if (queue_empty(rq->queues + thread->sched_pri)) { - /* update run queue status */ - if (thread->sched_pri != IDLEPRI) - clrbit(MAXPRI - thread->sched_pri, rq->bitmap); - rq->highq = MAXPRI - ffsbit(rq->bitmap); - } + remqueue((queue_entry_t)thread); + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count--; - thread->runq = RUN_QUEUE_NULL; + thread->runq = PROCESSOR_NULL; } else { /* * The thread left the run queue before we could * lock the run queue. */ - assert(thread->runq == RUN_QUEUE_NULL); - rq = RUN_QUEUE_NULL; + assert(thread->runq == PROCESSOR_NULL); + processor = PROCESSOR_NULL; } - if (pset != PROCESSOR_SET_NULL) - simple_unlock(&pset->sched_lock); - - if (processor != PROCESSOR_NULL) - processor_unlock(processor); + simple_unlock(&rt_lock); } - return (rq); + return (processor != PROCESSOR_NULL); } +#if defined(CONFIG_SCHED_TRADITIONAL) + /* - * choose_thread: + * steal_processor_thread: * - * Remove a thread to execute from the run queues - * and return it. + * Locate a thread to steal from the processor and + * return it. * - * Called with pset scheduling lock held. + * Associated pset must be locked. Returns THREAD_NULL + * on failure. */ static thread_t -choose_thread( - processor_set_t pset, - processor_t processor) +steal_processor_thread( + processor_t processor) { - register run_queue_t runq; - register thread_t thread; - register queue_t q; + run_queue_t rq = runq_for_processor(processor); + queue_t queue = rq->queues + rq->highq; + int pri = rq->highq, count = rq->count; + thread_t thread; - runq = &processor->runq; - - if (runq->count > 0 && runq->highq >= pset->runq.highq) { - q = runq->queues + runq->highq; - - thread = (thread_t)q->next; - ((queue_entry_t)thread)->next->prev = q; - q->next = ((queue_entry_t)thread)->next; - thread->runq = RUN_QUEUE_NULL; - runq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - runq->urgency--; - assert(runq->urgency >= 0); - if (queue_empty(q)) { - if (runq->highq != IDLEPRI) - clrbit(MAXPRI - runq->highq, runq->bitmap); - runq->highq = MAXPRI - ffsbit(runq->bitmap); - } + while (count > 0) { + thread = (thread_t)queue_first(queue); + while (!queue_end(queue, (queue_entry_t)thread)) { + if (thread->bound_processor == PROCESSOR_NULL) { + remqueue((queue_entry_t)thread); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + runq_consider_decr_bound_count(processor, thread); + rq->count--; + if (SCHED(priority_is_urgent)(pri)) { + rq->urgency--; assert(rq->urgency >= 0); + } + if (queue_empty(queue)) { + if (pri != IDLEPRI) + clrbit(MAXPRI - pri, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } - processor->deadline = UINT64_MAX; + return (thread); + } + count--; - return (thread); + thread = (thread_t)queue_next((queue_entry_t)thread); + } + + queue--; pri--; } - runq = &pset->runq; + return (THREAD_NULL); +} + +/* + * Locate and steal a thread, beginning + * at the pset. + * + * The pset must be locked, and is returned + * unlocked. + * + * Returns the stolen thread, or THREAD_NULL on + * failure. + */ +static thread_t +steal_thread( + processor_set_t pset) +{ + processor_set_t nset, cset = pset; + processor_t processor; + thread_t thread; - assert(runq->count > 0); - q = runq->queues + runq->highq; + do { + processor = (processor_t)queue_first(&cset->active_queue); + while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) { + if (runq_for_processor(processor)->count > 0) { + thread = steal_processor_thread(processor); + if (thread != THREAD_NULL) { + remqueue((queue_entry_t)processor); + enqueue_tail(&cset->active_queue, (queue_entry_t)processor); - thread = (thread_t)q->next; - ((queue_entry_t)thread)->next->prev = q; - q->next = ((queue_entry_t)thread)->next; - thread->runq = RUN_QUEUE_NULL; - runq->count--; - if (runq->highq >= BASEPRI_RTQUEUES) - processor->deadline = thread->realtime.deadline; - else - processor->deadline = UINT64_MAX; - if (thread->sched_mode & TH_MODE_PREEMPT) - runq->urgency--; - assert(runq->urgency >= 0); - if (queue_empty(q)) { - if (runq->highq != IDLEPRI) - clrbit(MAXPRI - runq->highq, runq->bitmap); - runq->highq = MAXPRI - ffsbit(runq->bitmap); - } + pset_unlock(cset); - timeshare_quanta_update(pset); + return (thread); + } + } - return (thread); + processor = (processor_t)queue_next((queue_entry_t)processor); + } + + nset = next_pset(cset); + + if (nset != pset) { + pset_unlock(cset); + + cset = nset; + pset_lock(cset); + } + } while (nset != pset); + + pset_unlock(cset); + + return (THREAD_NULL); } -static processor_t -delay_idle( - processor_t processor, - thread_t self) +static thread_t steal_thread_disabled( + processor_set_t pset) { - int *gcount, *lcount; - uint64_t abstime, spin, limit; - - lcount = &processor->runq.count; - gcount = &processor->processor_set->runq.count; + pset_unlock(pset); - abstime = mach_absolute_time(); - limit = abstime + delay_idle_limit; - spin = abstime + delay_idle_spin; + return (THREAD_NULL); +} - timer_event((uint32_t)abstime, &processor->idle_thread->system_timer); +#endif /* CONFIG_SCHED_TRADITIONAL */ - self->options |= TH_OPT_DELAYIDLE; - while ( *gcount == 0 && *lcount == 0 && - (self->state & TH_WAIT) != 0 && - abstime < limit ) { - if (abstime >= spin) { - (void)spllo(); +int +thread_get_urgency(uint64_t *rt_period, uint64_t *rt_deadline) +{ + processor_t processor; + thread_t thread; + + processor = current_processor(); - (void)splsched(); - processor = current_processor(); - lcount = &processor->runq.count; - gcount = &processor->processor_set->runq.count; + thread = processor->next_thread; - abstime = mach_absolute_time(); - spin = abstime + delay_idle_spin; + if (thread != NULL) { + if (thread->sched_mode == TH_MODE_REALTIME) { - timer_event((uint32_t)abstime, &processor->idle_thread->system_timer); - } - else - abstime = mach_absolute_time(); - } + if (rt_period != NULL) + *rt_period = thread->realtime.period; + if (rt_deadline != NULL) + *rt_deadline = thread->realtime.deadline; - timer_event((uint32_t)abstime, &self->system_timer); + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_REAL_TIME, thread->realtime.period, + (thread->realtime.deadline >> 32), thread->realtime.deadline, 0); - self->options &= ~TH_OPT_DELAYIDLE; + return (THREAD_URGENCY_REAL_TIME); + } else if ((thread->sched_pri <= MAXPRI_THROTTLE) && + (thread->priority <= MAXPRI_THROTTLE)) { + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_BACKGROUND, thread->sched_pri, thread->priority, 0, 0); + return (THREAD_URGENCY_BACKGROUND); + } + else + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_NORMAL, 0, 0, 0, 0); - return (processor); + return (THREAD_URGENCY_NORMAL); + } + else + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_NONE, 0, 0, 0, 0); + return (THREAD_URGENCY_NONE); } -/* - * no_dispatch_count counts number of times processors go non-idle - * without being dispatched. This should be very rare. - */ -int no_dispatch_count = 0; /* - * This is the idle processor thread, which just looks for other threads - * to execute. + * This is the processor idle loop, which just looks for other threads + * to execute. Processor idle threads invoke this without supplying a + * current thread to idle without an asserted wait state. + * + * Returns a the next thread to execute if dispatched directly. */ -void -idle_thread(void) -{ - register processor_t processor; - register thread_t *threadp; - register int *gcount; - register int *lcount; - register thread_t new_thread; - register int state; - register processor_set_t pset; - ast_t *myast = ast_pending(); - processor = current_processor(); +#if 0 +#define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__) +#else +#define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0) +#endif - threadp = &processor->next_thread; - lcount = &processor->runq.count; - gcount = &processor->processor_set->runq.count; +thread_t +processor_idle( + thread_t thread, + processor_t processor) +{ + processor_set_t pset = processor->processor_set; + thread_t new_thread; + int state; + (void)splsched(); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START, (uintptr_t)thread_tid(thread), 0, 0, 0, 0); - (void)splsched(); /* Turn interruptions off */ + SCHED_STATS_CPU_IDLE_START(processor); -#ifdef __ppc__ - pmsDown(); /* Step power down. Note: interruptions must be disabled for this call */ -#endif + timer_switch(&PROCESSOR_DATA(processor, system_state), + mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state)); + PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state); - while ( (*threadp == THREAD_NULL) && - (*gcount == 0) && (*lcount == 0) ) { + while (processor->next_thread == THREAD_NULL && SCHED(processor_queue_empty)(processor) && rt_runq.count == 0 && SCHED(fairshare_runq_count)() == 0 && + (thread == THREAD_NULL || ((thread->state & (TH_WAIT|TH_SUSP)) == TH_WAIT && !thread->wake_active))) { + IDLE_KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -1, 0); - /* check for ASTs while we wait */ - if (*myast &~ (AST_SCHEDULING | AST_BSD)) { - /* no ASTs for us */ - *myast &= AST_NONE; - (void)spllo(); - } - else - machine_idle(); + machine_idle(); (void)splsched(); + + IDLE_KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -2, 0); + + if (processor->state == PROCESSOR_INACTIVE && !machine_processor_is_inactive(processor)) + break; } - /* - * This is not a switch statement to avoid the - * bounds checking code in the common case. - */ - pset = processor->processor_set; - simple_lock(&pset->sched_lock); + timer_switch(&PROCESSOR_DATA(processor, idle_state), + mach_absolute_time(), &PROCESSOR_DATA(processor, system_state)); + PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state); -#ifdef __ppc__ - pmsStep(0); /* Step up out of idle power, may start timer for next step */ -#endif + pset_lock(pset); state = processor->state; if (state == PROCESSOR_DISPATCHING) { /* * Commmon case -- cpu dispatched. */ - new_thread = *threadp; - *threadp = (volatile thread_t) THREAD_NULL; + new_thread = processor->next_thread; + processor->next_thread = THREAD_NULL; processor->state = PROCESSOR_RUNNING; - enqueue_tail(&pset->active_queue, (queue_entry_t)processor); - - if ( pset->runq.highq >= BASEPRI_RTQUEUES && - new_thread->sched_pri >= BASEPRI_RTQUEUES ) { - register run_queue_t runq = &pset->runq; - register queue_t q; - - q = runq->queues + runq->highq; - if (((thread_t)q->next)->realtime.deadline < - processor->deadline) { - thread_t thread = new_thread; - - new_thread = (thread_t)q->next; - ((queue_entry_t)new_thread)->next->prev = q; - q->next = ((queue_entry_t)new_thread)->next; - new_thread->runq = RUN_QUEUE_NULL; - processor->deadline = new_thread->realtime.deadline; - assert(new_thread->sched_mode & TH_MODE_PREEMPT); - runq->count--; runq->urgency--; - if (queue_empty(q)) { - if (runq->highq != IDLEPRI) - clrbit(MAXPRI - runq->highq, runq->bitmap); - runq->highq = MAXPRI - ffsbit(runq->bitmap); - } - dispatch_counts.missed_realtime++; - simple_unlock(&pset->sched_lock); - - thread_lock(thread); - thread_setrun(thread, SCHED_HEADQ); - thread_unlock(thread); - - counter(c_idle_thread_handoff++); - thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread); - /*NOTREACHED*/ - } - simple_unlock(&pset->sched_lock); - counter(c_idle_thread_handoff++); - thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread); - /*NOTREACHED*/ - } + if (SCHED(processor_queue_has_priority)(processor, new_thread->sched_pri, FALSE) || + (rt_runq.count > 0 && BASEPRI_RTQUEUES >= new_thread->sched_pri) ) { + processor->deadline = UINT64_MAX; - if ( processor->runq.highq > new_thread->sched_pri || - pset->runq.highq > new_thread->sched_pri ) { - thread_t thread = new_thread; + pset_unlock(pset); - new_thread = choose_thread(pset, processor); - dispatch_counts.missed_other++; - simple_unlock(&pset->sched_lock); + thread_lock(new_thread); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REDISPATCH), (uintptr_t)thread_tid(new_thread), new_thread->sched_pri, rt_runq.count, 0, 0); + thread_setrun(new_thread, SCHED_HEADQ); + thread_unlock(new_thread); - thread_lock(thread); - thread_setrun(thread, SCHED_HEADQ); - thread_unlock(thread); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0); - counter(c_idle_thread_handoff++); - thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread); - /* NOTREACHED */ + return (THREAD_NULL); } - else { - simple_unlock(&pset->sched_lock); - counter(c_idle_thread_handoff++); - thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread); - /* NOTREACHED */ - } + pset_unlock(pset); + + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0); + + return (new_thread); } else if (state == PROCESSOR_IDLE) { - /* - * Processor was not dispatched (Rare). - * Set it running again and force a - * reschedule. - */ - no_dispatch_count++; - pset->idle_count--; - remqueue(&pset->idle_queue, (queue_entry_t)processor); + remqueue((queue_entry_t)processor); + + processor->state = PROCESSOR_RUNNING; + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); + } + else + if (state == PROCESSOR_INACTIVE) { processor->state = PROCESSOR_RUNNING; enqueue_tail(&pset->active_queue, (queue_entry_t)processor); - simple_unlock(&pset->sched_lock); - - counter(c_idle_thread_block++); - thread_block((thread_continue_t)idle_thread); - /* NOTREACHED */ } else if (state == PROCESSOR_SHUTDOWN) { @@ -2666,26 +3993,49 @@ idle_thread(void) * Going off-line. Force a * reschedule. */ - if ((new_thread = (thread_t)*threadp) != THREAD_NULL) { - *threadp = (volatile thread_t) THREAD_NULL; + if ((new_thread = processor->next_thread) != THREAD_NULL) { + processor->next_thread = THREAD_NULL; processor->deadline = UINT64_MAX; - simple_unlock(&pset->sched_lock); + + pset_unlock(pset); thread_lock(new_thread); thread_setrun(new_thread, SCHED_HEADQ); thread_unlock(new_thread); - } - else - simple_unlock(&pset->sched_lock); - counter(c_idle_thread_block++); - thread_block((thread_continue_t)idle_thread); - /* NOTREACHED */ + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0); + + return (THREAD_NULL); + } } - simple_unlock(&pset->sched_lock); + pset_unlock(pset); + + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0); + + return (THREAD_NULL); +} + +/* + * Each processor has a dedicated thread which + * executes the idle loop when there is no suitable + * previous context. + */ +void +idle_thread(void) +{ + processor_t processor = current_processor(); + thread_t new_thread; + + new_thread = processor_idle(THREAD_NULL, processor); + if (new_thread != THREAD_NULL) { + thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread); + /*NOTREACHED*/ + } - panic("idle_thread: state %d\n", processor->state); + thread_block((thread_continue_t)idle_thread); /*NOTREACHED*/ } @@ -2715,8 +4065,6 @@ idle_thread_create( return (KERN_SUCCESS); } -static uint64_t sched_tick_deadline; - /* * sched_startup: * @@ -2730,14 +4078,16 @@ sched_startup(void) kern_return_t result; thread_t thread; - result = kernel_thread_start_priority((thread_continue_t)sched_tick_thread, NULL, MAXPRI_KERNEL, &thread); + result = kernel_thread_start_priority((thread_continue_t)sched_init_thread, + (void *)SCHED(maintenance_continuation), + MAXPRI_KERNEL, &thread); if (result != KERN_SUCCESS) panic("sched_startup"); thread_deallocate(thread); /* - * Yield to the sched_tick_thread while it times + * Yield to the sched_init_thread while it times * a series of context switches back. It stores * the baseline value in sched_cswtime. * @@ -2746,20 +4096,20 @@ sched_startup(void) */ while (sched_cswtime == 0) thread_block(THREAD_CONTINUE_NULL); +} - thread_daemon_init(); +#if defined(CONFIG_SCHED_TRADITIONAL) - thread_call_initialize(); -} +static uint64_t sched_tick_deadline = 0; /* - * sched_tick_thread: + * sched_init_thread: * * Perform periodic bookkeeping functions about ten * times per second. */ static void -sched_tick_continue(void) +sched_traditional_tick_continue(void) { uint64_t abstime = mach_absolute_time(); @@ -2776,14 +4126,44 @@ sched_tick_continue(void) */ thread_update_scan(); + if (sched_tick_deadline == 0) + sched_tick_deadline = abstime; + clock_deadline_for_periodic_event(sched_tick_interval, abstime, &sched_tick_deadline); - assert_wait_deadline((event_t)sched_tick_thread, THREAD_UNINT, sched_tick_deadline); - thread_block((thread_continue_t)sched_tick_continue); + assert_wait_deadline((event_t)sched_traditional_tick_continue, THREAD_UNINT, sched_tick_deadline); + thread_block((thread_continue_t)sched_traditional_tick_continue); /*NOTREACHED*/ } +#endif /* CONFIG_SCHED_TRADITIONAL */ + +static uint32_t +time_individual_cswitch(void) +{ + uint32_t switches = 0; + uint64_t newtime, starttime; + + /* Wait for absolute time to increase. */ + starttime = mach_absolute_time(); + do { + newtime = mach_absolute_time(); + } while (newtime == starttime); + + /* Measure one or more context switches until time increases again. + * This ensures we get non-zero timings even if absolute time + * increases very infrequently compared to CPU clock. */ + starttime = newtime; + do { + thread_block(THREAD_CONTINUE_NULL); + newtime = mach_absolute_time(); + ++switches; + } while (newtime == starttime); + /* Round up. */ + return (uint32_t) ((newtime - starttime + switches - 1) / switches); +} + /* * Time a series of context switches to determine * a baseline. Toss the high and low and return @@ -2793,15 +4173,11 @@ static uint32_t time_cswitch(void) { uint32_t new, hi, low, accum; - uint64_t abstime; - int i, tries = 7; + int i, tries = 7, denom; accum = hi = low = 0; for (i = 0; i < tries; ++i) { - abstime = mach_absolute_time(); - thread_block(THREAD_CONTINUE_NULL); - - new = mach_absolute_time() - abstime; + new = time_individual_cswitch(); if (i == 0) accum = hi = low = new; @@ -2814,21 +4190,24 @@ time_cswitch(void) accum += new; } } - - return ((accum - hi - low) / (2 * (tries - 2))); + /* Round up. */ + denom = 2 * (tries - 2); + return (accum - hi - low + denom - 1) / denom; } void -sched_tick_thread(void) +sched_init_thread(void (*continuation)(void)) { sched_cswtime = time_cswitch(); + assert(sched_cswtime > 0); - sched_tick_deadline = mach_absolute_time(); + continuation(); - sched_tick_continue(); /*NOTREACHED*/ } +#if defined(CONFIG_SCHED_TRADITIONAL) + /* * thread_update_scan / runq_scan: * @@ -2868,7 +4247,7 @@ runq_scan( while (count > 0) { queue_iterate(q, thread, thread_t, links) { if ( thread->sched_stamp != sched_tick && - (thread->sched_mode & TH_MODE_TIMESHARE) ) { + (thread->sched_mode == TH_MODE_TIMESHARE) ) { if (thread_update_count == THREAD_UPDATE_SIZE) return (TRUE); @@ -2889,41 +4268,38 @@ runq_scan( static void thread_update_scan(void) { - register boolean_t restart_needed; - register processor_set_t pset = &default_pset; - register processor_t processor; - register thread_t thread; - spl_t s; + boolean_t restart_needed = FALSE; + processor_t processor = processor_list; + processor_set_t pset; + thread_t thread; + spl_t s; do { - s = splsched(); - simple_lock(&pset->sched_lock); - restart_needed = runq_scan(&pset->runq); - simple_unlock(&pset->sched_lock); - - if (!restart_needed) { - simple_lock(&pset->sched_lock); - processor = (processor_t)queue_first(&pset->processors); - while (!queue_end(&pset->processors, (queue_entry_t)processor)) { - if ((restart_needed = runq_scan(&processor->runq)) != 0) - break; + do { + pset = processor->processor_set; - thread = processor->idle_thread; - if (thread->sched_stamp != sched_tick) { - if (thread_update_count == THREAD_UPDATE_SIZE) { - restart_needed = TRUE; - break; - } + s = splsched(); + pset_lock(pset); - thread_update_array[thread_update_count++] = thread; - thread_reference_internal(thread); + restart_needed = runq_scan(runq_for_processor(processor)); + + pset_unlock(pset); + splx(s); + + if (restart_needed) + break; + + thread = processor->idle_thread; + if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { + if (thread_update_count == THREAD_UPDATE_SIZE) { + restart_needed = TRUE; + break; } - processor = (processor_t)queue_next(&processor->processors); + thread_update_array[thread_update_count++] = thread; + thread_reference_internal(thread); } - simple_unlock(&pset->sched_lock); - } - splx(s); + } while ((processor = processor->processor_list) != NULL); /* * Ok, we now have a collection of candidates -- fix them. @@ -2934,9 +4310,10 @@ thread_update_scan(void) s = splsched(); thread_lock(thread); - if ( !(thread->state & (TH_WAIT|TH_SUSP)) && - thread->sched_stamp != sched_tick ) - update_priority(thread); + if ( !(thread->state & (TH_WAIT)) ) { + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); + } thread_unlock(thread); splx(s); @@ -2944,20 +4321,115 @@ thread_update_scan(void) } } while (restart_needed); } + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +boolean_t +thread_eager_preemption(thread_t thread) +{ + return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0); +} + +void +thread_set_eager_preempt(thread_t thread) +{ + spl_t x; + processor_t p; + ast_t ast = AST_NONE; + + x = splsched(); + p = current_processor(); + + thread_lock(thread); + thread->sched_flags |= TH_SFLAG_EAGERPREEMPT; + + if (thread == current_thread()) { + thread_unlock(thread); + + ast = csw_check(p); + if (ast != AST_NONE) { + (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast); + } + } else { + p = thread->last_processor; + + if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING && + p->active_thread == thread) { + cause_ast_check(p); + } + thread_unlock(thread); + } + + splx(x); +} + +void +thread_clear_eager_preempt(thread_t thread) +{ + spl_t x; + + x = splsched(); + thread_lock(thread); + + thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT; + + thread_unlock(thread); + splx(x); +} +/* + * Scheduling statistics + */ +void +sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri) +{ + struct processor_sched_statistics *stats; + boolean_t to_realtime = FALSE; + + stats = &processor->processor_data.sched_stats; + stats->csw_count++; + + if (otherpri >= BASEPRI_REALTIME) { + stats->rt_sched_count++; + to_realtime = TRUE; + } + + if ((reasons & AST_PREEMPT) != 0) { + stats->preempt_count++; + + if (selfpri >= BASEPRI_REALTIME) { + stats->preempted_rt_count++; + } + + if (to_realtime) { + stats->preempted_by_rt_count++; + } + + } +} + +void +sched_stats_handle_runq_change(struct runq_stats *stats, int old_count) +{ + uint64_t timestamp = mach_absolute_time(); + + stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count; + stats->last_change_timestamp = timestamp; +} + /* - * Just in case someone doesn't use the macro + * For calls from assembly code */ -#undef thread_wakeup +#undef thread_wakeup void thread_wakeup( - event_t x); + event_t x); void thread_wakeup( - event_t x) + event_t x) { - thread_wakeup_with_result(x, THREAD_AWAKENED); + thread_wakeup_with_result(x, THREAD_AWAKENED); } boolean_t @@ -2990,9 +4462,9 @@ db_sched(void) #if MACH_COUNTERS iprintf("Thread block: calls %d\n", c_thread_block_calls); - iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n", + iprintf("Idle thread:\n\thandoff %d block %d\n", c_idle_thread_handoff, - c_idle_thread_block, no_dispatch_count); + c_idle_thread_block); iprintf("Sched thread blocks: %d\n", c_sched_thread_block); #endif /* MACH_COUNTERS */ db_indent -= 2;