X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..e2d2fc5c71f7d145cba7267989251af45e3bb5ba:/osfmk/kern/sched_prim.c diff --git a/osfmk/kern/sched_prim.c b/osfmk/kern/sched_prim.c index 0cbde484c..d7b959249 100644 --- a/osfmk/kern/sched_prim.c +++ b/osfmk/kern/sched_prim.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -73,10 +73,12 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -101,14 +103,22 @@ #include #include +#include + #include #include -struct run_queue rt_runq; +struct rt_queue rt_runq; #define RT_RUNQ ((processor_t)-1) decl_simple_lock_data(static,rt_lock); +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) +static struct fairshare_queue fs_runq; +#define FS_RUNQ ((processor_t)-2) +decl_simple_lock_data(static,fs_lock); +#endif + #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ int default_preemption_rate = DEFAULT_PREEMPTION_RATE; @@ -121,52 +131,203 @@ int max_poll_quanta = MAX_POLL_QUANTA; #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */ int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT; -uint64_t max_unsafe_computation; -uint32_t sched_safe_duration; uint64_t max_poll_computation; +uint64_t max_unsafe_computation; +uint64_t sched_safe_duration; + +#if defined(CONFIG_SCHED_TRADITIONAL) + uint32_t std_quantum; uint32_t min_std_quantum; uint32_t std_quantum_us; +#endif /* CONFIG_SCHED_TRADITIONAL */ + +uint32_t thread_depress_time; +uint32_t default_timeshare_computation; +uint32_t default_timeshare_constraint; + uint32_t max_rt_quantum; uint32_t min_rt_quantum; uint32_t sched_cswtime; +#if defined(CONFIG_SCHED_TRADITIONAL) + unsigned sched_tick; uint32_t sched_tick_interval; uint32_t sched_pri_shift = INT8_MAX; uint32_t sched_fixed_shift; +static boolean_t sched_traditional_use_pset_runqueue = FALSE; + +__attribute__((always_inline)) +static inline run_queue_t runq_for_processor(processor_t processor) +{ + if (sched_traditional_use_pset_runqueue) + return &processor->processor_set->pset_runq; + else + return &processor->runq; +} + +__attribute__((always_inline)) +static inline void runq_consider_incr_bound_count(processor_t processor, thread_t thread) +{ + if (thread->bound_processor == PROCESSOR_NULL) + return; + + assert(thread->bound_processor == processor); + + if (sched_traditional_use_pset_runqueue) + processor->processor_set->pset_runq_bound_count++; + + processor->runq_bound_count++; +} + +__attribute__((always_inline)) +static inline void runq_consider_decr_bound_count(processor_t processor, thread_t thread) +{ + if (thread->bound_processor == PROCESSOR_NULL) + return; + + assert(thread->bound_processor == processor); + + if (sched_traditional_use_pset_runqueue) + processor->processor_set->pset_runq_bound_count--; + + processor->runq_bound_count--; +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +uint64_t sched_one_second_interval; + uint32_t sched_run_count, sched_share_count; uint32_t sched_load_average, sched_mach_factor; -void (*pm_tick_callout)(void) = NULL; - /* Forwards */ -void wait_queues_init(void) __attribute__((section("__TEXT, initcode"))); + +#if defined(CONFIG_SCHED_TRADITIONAL) static void load_shift_init(void) __attribute__((section("__TEXT, initcode"))); +static void preempt_pri_init(void) __attribute__((section("__TEXT, initcode"))); -static thread_t thread_select_idle( +#endif /* CONFIG_SCHED_TRADITIONAL */ + +static thread_t thread_select( thread_t thread, processor_t processor); -static thread_t processor_idle( +#if CONFIG_SCHED_IDLE_IN_PLACE +static thread_t thread_select_idle( thread_t thread, processor_t processor); +#endif -static thread_t choose_thread( +thread_t processor_idle( + thread_t thread, processor_t processor); +#if defined(CONFIG_SCHED_TRADITIONAL) + static thread_t steal_thread( + processor_set_t pset); + +static thread_t steal_thread_disabled( + processor_set_t pset) __attribute__((unused)); + + +static thread_t steal_processor_thread( processor_t processor); static void thread_update_scan(void); +static void processor_setrun( + processor_t processor, + thread_t thread, + integer_t options); + +static boolean_t +processor_enqueue( + processor_t processor, + thread_t thread, + integer_t options); + +static boolean_t +processor_queue_remove( + processor_t processor, + thread_t thread); + +static boolean_t processor_queue_empty(processor_t processor); + +static boolean_t priority_is_urgent(int priority); + +static ast_t processor_csw_check(processor_t processor); + +static boolean_t processor_queue_has_priority(processor_t processor, + int priority, + boolean_t gte); + +static boolean_t should_current_thread_rechoose_processor(processor_t processor); + +static int sched_traditional_processor_runq_count(processor_t processor); + +static boolean_t sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor); + +static uint64_t sched_traditional_processor_runq_stats_count_sum(processor_t processor); + +static uint64_t sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor); +#endif + + +#if defined(CONFIG_SCHED_TRADITIONAL) + +static void +sched_traditional_init(void); + +static void +sched_traditional_timebase_init(void); + +static void +sched_traditional_processor_init(processor_t processor); + +static void +sched_traditional_pset_init(processor_set_t pset); + +static void +sched_traditional_with_pset_runqueue_init(void); + +#endif + +static void +sched_realtime_init(void) __attribute__((section("__TEXT, initcode"))); + +static void +sched_realtime_timebase_init(void); + +#if defined(CONFIG_SCHED_TRADITIONAL) +static void +sched_traditional_tick_continue(void); + +static uint32_t +sched_traditional_initial_quantum_size(thread_t thread); + +static sched_mode_t +sched_traditional_initial_thread_sched_mode(task_t parent_task); + +static boolean_t +sched_traditional_supports_timeshare_mode(void); + +static thread_t +sched_traditional_choose_thread( + processor_t processor, + int priority); + +#endif + #if DEBUG extern int debug_task; #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args) @@ -181,8 +342,6 @@ boolean_t thread_runnable( #endif /*DEBUG*/ - - /* * State machine * @@ -209,43 +368,221 @@ boolean_t thread_runnable( * */ +#if defined(CONFIG_SCHED_TRADITIONAL) +int8_t sched_load_shifts[NRQS]; +int sched_preempt_pri[NRQBM]; +#endif + + +#if defined(CONFIG_SCHED_TRADITIONAL) + +const struct sched_dispatch_table sched_traditional_dispatch = { + sched_traditional_init, + sched_traditional_timebase_init, + sched_traditional_processor_init, + sched_traditional_pset_init, + sched_traditional_tick_continue, + sched_traditional_choose_thread, + steal_thread, + compute_priority, + choose_processor, + processor_enqueue, + processor_queue_shutdown, + processor_queue_remove, + processor_queue_empty, + priority_is_urgent, + processor_csw_check, + processor_queue_has_priority, + sched_traditional_initial_quantum_size, + sched_traditional_initial_thread_sched_mode, + sched_traditional_supports_timeshare_mode, + can_update_priority, + update_priority, + lightweight_update_priority, + sched_traditional_quantum_expire, + should_current_thread_rechoose_processor, + sched_traditional_processor_runq_count, + sched_traditional_processor_runq_stats_count_sum, + sched_traditional_fairshare_init, + sched_traditional_fairshare_runq_count, + sched_traditional_fairshare_runq_stats_count_sum, + sched_traditional_fairshare_enqueue, + sched_traditional_fairshare_dequeue, + sched_traditional_fairshare_queue_remove, + TRUE /* direct_dispatch_to_idle_processors */ +}; + +const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch = { + sched_traditional_with_pset_runqueue_init, + sched_traditional_timebase_init, + sched_traditional_processor_init, + sched_traditional_pset_init, + sched_traditional_tick_continue, + sched_traditional_choose_thread, + steal_thread, + compute_priority, + choose_processor, + processor_enqueue, + processor_queue_shutdown, + processor_queue_remove, + sched_traditional_with_pset_runqueue_processor_queue_empty, + priority_is_urgent, + processor_csw_check, + processor_queue_has_priority, + sched_traditional_initial_quantum_size, + sched_traditional_initial_thread_sched_mode, + sched_traditional_supports_timeshare_mode, + can_update_priority, + update_priority, + lightweight_update_priority, + sched_traditional_quantum_expire, + should_current_thread_rechoose_processor, + sched_traditional_processor_runq_count, + sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum, + sched_traditional_fairshare_init, + sched_traditional_fairshare_runq_count, + sched_traditional_fairshare_runq_stats_count_sum, + sched_traditional_fairshare_enqueue, + sched_traditional_fairshare_dequeue, + sched_traditional_fairshare_queue_remove, + FALSE /* direct_dispatch_to_idle_processors */ +}; + +#endif + +const struct sched_dispatch_table *sched_current_dispatch = NULL; + /* - * Waiting protocols and implementation: - * - * Each thread may be waiting for exactly one event; this event - * is set using assert_wait(). That thread may be awakened either - * by performing a thread_wakeup_prim() on its event, - * or by directly waking that thread up with clear_wait(). + * Statically allocate a buffer to hold the longest possible + * scheduler description string, as currently implemented. + * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/ + * to export to userspace via sysctl(3). If either version + * changes, update the other. * - * The implementation of wait events uses a hash table. Each - * bucket is queue of threads having the same hash function - * value; the chain for the queue (linked list) is the run queue - * field. [It is not possible to be waiting and runnable at the - * same time.] - * - * Locks on both the thread and on the hash buckets govern the - * wait event field and the queue chain field. Because wakeup - * operations only have the event as an argument, the event hash - * bucket must be locked before any thread. - * - * Scheduling operations may also occur at interrupt level; therefore, - * interrupts below splsched() must be prevented when holding - * thread or hash bucket locks. - * - * The wait event hash table declarations are as follows: + * Note that in addition to being an upper bound on the strings + * in the kernel, it's also an exact parameter to PE_get_default(), + * which interrogates the device tree on some platforms. That + * API requires the caller know the exact size of the device tree + * property, so we need both a legacy size (32) and the current size + * (48) to deal with old and new device trees. The device tree property + * is similarly padded to a fixed size so that the same kernel image + * can run on multiple devices with different schedulers configured + * in the device tree. */ +#define SCHED_STRING_MAX_LENGTH (48) -#define NUMQUEUES 59 - -struct wait_queue wait_queues[NUMQUEUES]; +char sched_string[SCHED_STRING_MAX_LENGTH]; +static enum sched_enum _sched_enum = sched_enum_unknown; -#define wait_hash(event) \ - ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES) +void +sched_init(void) +{ + char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' }; + + /* Check for runtime selection of the scheduler algorithm */ + if (!PE_parse_boot_argn("sched", sched_arg, sizeof (sched_arg))) { + /* If no boot-args override, look in device tree */ + if (!PE_get_default("kern.sched", sched_arg, + SCHED_STRING_MAX_LENGTH)) { + sched_arg[0] = '\0'; + } + } -int8_t sched_load_shifts[NRQS]; + if (strlen(sched_arg) > 0) { + if (0) { + /* Allow pattern below */ +#if defined(CONFIG_SCHED_TRADITIONAL) + } else if (0 == strcmp(sched_arg, kSchedTraditionalString)) { + sched_current_dispatch = &sched_traditional_dispatch; + _sched_enum = sched_enum_traditional; + strlcpy(sched_string, kSchedTraditionalString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedTraditionalString); + } else if (0 == strcmp(sched_arg, kSchedTraditionalWithPsetRunqueueString)) { + sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch; + _sched_enum = sched_enum_traditional_with_pset_runqueue; + strlcpy(sched_string, kSchedTraditionalWithPsetRunqueueString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedTraditionalWithPsetRunqueueString); +#endif +#if defined(CONFIG_SCHED_PROTO) + } else if (0 == strcmp(sched_arg, kSchedProtoString)) { + sched_current_dispatch = &sched_proto_dispatch; + _sched_enum = sched_enum_proto; + strlcpy(sched_string, kSchedProtoString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedProtoString); +#endif +#if defined(CONFIG_SCHED_GRRR) + } else if (0 == strcmp(sched_arg, kSchedGRRRString)) { + sched_current_dispatch = &sched_grrr_dispatch; + _sched_enum = sched_enum_grrr; + strlcpy(sched_string, kSchedGRRRString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedGRRRString); +#endif +#if defined(CONFIG_SCHED_FIXEDPRIORITY) + } else if (0 == strcmp(sched_arg, kSchedFixedPriorityString)) { + sched_current_dispatch = &sched_fixedpriority_dispatch; + _sched_enum = sched_enum_fixedpriority; + strlcpy(sched_string, kSchedFixedPriorityString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedFixedPriorityString); + } else if (0 == strcmp(sched_arg, kSchedFixedPriorityWithPsetRunqueueString)) { + sched_current_dispatch = &sched_fixedpriority_with_pset_runqueue_dispatch; + _sched_enum = sched_enum_fixedpriority_with_pset_runqueue; + strlcpy(sched_string, kSchedFixedPriorityWithPsetRunqueueString, sizeof(sched_string)); + kprintf("Scheduler: Runtime selection of %s\n", kSchedFixedPriorityWithPsetRunqueueString); +#endif + } else { + panic("Unrecognized scheduler algorithm: %s", sched_arg); + } + } else { +#if defined(CONFIG_SCHED_TRADITIONAL) + sched_current_dispatch = &sched_traditional_dispatch; + _sched_enum = sched_enum_traditional; + strlcpy(sched_string, kSchedTraditionalString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedTraditionalString); +#elif defined(CONFIG_SCHED_PROTO) + sched_current_dispatch = &sched_proto_dispatch; + _sched_enum = sched_enum_proto; + strlcpy(sched_string, kSchedProtoString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedProtoString); +#elif defined(CONFIG_SCHED_GRRR) + sched_current_dispatch = &sched_grrr_dispatch; + _sched_enum = sched_enum_grrr; + strlcpy(sched_string, kSchedGRRRString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedGRRRString); +#elif defined(CONFIG_SCHED_FIXEDPRIORITY) + sched_current_dispatch = &sched_fixedpriority_dispatch; + _sched_enum = sched_enum_fixedpriority; + strlcpy(sched_string, kSchedFixedPriorityString, sizeof(sched_string)); + kprintf("Scheduler: Default of %s\n", kSchedFixedPriorityString); +#else +#error No default scheduler implementation +#endif + } + + SCHED(init)(); + SCHED(fairshare_init)(); + sched_realtime_init(); + ast_init(); + + SCHED(pset_init)(&pset0); + SCHED(processor_init)(master_processor); +} void -sched_init(void) +sched_timebase_init(void) +{ + uint64_t abstime; + + clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime); + sched_one_second_interval = abstime; + + SCHED(timebase_init)(); + sched_realtime_timebase_init(); +} + +#if defined(CONFIG_SCHED_TRADITIONAL) + +static void +sched_traditional_init(void) { /* * Calculate the timeslicing quantum @@ -257,19 +594,13 @@ sched_init(void) printf("standard timeslicing quantum is %d us\n", std_quantum_us); - sched_safe_duration = (2 * max_unsafe_quanta / default_preemption_rate) * - (1 << SCHED_TICK_SHIFT); - - wait_queues_init(); load_shift_init(); - simple_lock_init(&rt_lock, 0); - run_queue_init(&rt_runq); + preempt_pri_init(); sched_tick = 0; - ast_init(); } -void -sched_timebase_init(void) +static void +sched_traditional_timebase_init(void) { uint64_t abstime; uint32_t shift; @@ -278,29 +609,18 @@ sched_timebase_init(void) clock_interval_to_absolutetime_interval( std_quantum_us, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - std_quantum = abstime; + std_quantum = (uint32_t)abstime; /* smallest remaining quantum (250 us) */ clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - min_std_quantum = abstime; - - /* smallest rt computaton (50 us) */ - clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - min_rt_quantum = abstime; - - /* maximum rt computation (50 ms) */ - clock_interval_to_absolutetime_interval( - 50, 1000*NSEC_PER_USEC, &abstime); - assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - max_rt_quantum = abstime; + min_std_quantum = (uint32_t)abstime; /* scheduler tick interval */ clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); - sched_tick_interval = abstime; + sched_tick_interval = (uint32_t)abstime; /* * Compute conversion factor from usage to @@ -312,19 +632,82 @@ sched_timebase_init(void) sched_fixed_shift = shift; max_unsafe_computation = max_unsafe_quanta * std_quantum; + sched_safe_duration = 2 * max_unsafe_quanta * std_quantum; + max_poll_computation = max_poll_quanta * std_quantum; + thread_depress_time = 1 * std_quantum; + default_timeshare_computation = std_quantum / 2; + default_timeshare_constraint = std_quantum; + } -void -wait_queues_init(void) +static void +sched_traditional_processor_init(processor_t processor) { - register int i; + if (!sched_traditional_use_pset_runqueue) { + run_queue_init(&processor->runq); + } + processor->runq_bound_count = 0; +} - for (i = 0; i < NUMQUEUES; i++) { - wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO); +static void +sched_traditional_pset_init(processor_set_t pset) +{ + if (sched_traditional_use_pset_runqueue) { + run_queue_init(&pset->pset_runq); } + pset->pset_runq_bound_count = 0; +} + +static void +sched_traditional_with_pset_runqueue_init(void) +{ + sched_traditional_init(); + sched_traditional_use_pset_runqueue = TRUE; +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) +void +sched_traditional_fairshare_init(void) +{ + simple_lock_init(&fs_lock, 0); + + fs_runq.count = 0; + queue_init(&fs_runq.queue); +} +#endif + +static void +sched_realtime_init(void) +{ + simple_lock_init(&rt_lock, 0); + + rt_runq.count = 0; + queue_init(&rt_runq.queue); +} + +static void +sched_realtime_timebase_init(void) +{ + uint64_t abstime; + + /* smallest rt computaton (50 us) */ + clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + min_rt_quantum = (uint32_t)abstime; + + /* maximum rt computation (50 ms) */ + clock_interval_to_absolutetime_interval( + 50, 1000*NSEC_PER_USEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + max_rt_quantum = (uint32_t)abstime; + } +#if defined(CONFIG_SCHED_TRADITIONAL) + /* * Set up values for timeshare * loading factors. @@ -343,6 +726,20 @@ load_shift_init(void) } } +static void +preempt_pri_init(void) +{ + int i, *p = sched_preempt_pri; + + for (i = BASEPRI_FOREGROUND + 1; i < MINPRI_KERNEL; ++i) + setbit(i, p); + + for (i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) + setbit(i, p); +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* * Thread wait timer expiration. */ @@ -366,6 +763,8 @@ thread_timer_expire( splx(s); } +#ifndef __LP64__ + /* * thread_set_timer: * @@ -386,7 +785,7 @@ thread_set_timer( thread_lock(thread); if ((thread->state & TH_WAIT) != 0) { clock_interval_to_deadline(interval, scale_factor, &deadline); - if (!timer_call_enter(&thread->wait_timer, deadline)) + if (!timer_call_enter(&thread->wait_timer, deadline, thread->sched_pri >= BASEPRI_RTQUEUES ? TIMER_CALL_CRITICAL : 0)) thread->wait_timer_active++; thread->wait_timer_is_set = TRUE; } @@ -404,7 +803,7 @@ thread_set_timer_deadline( s = splsched(); thread_lock(thread); if ((thread->state & TH_WAIT) != 0) { - if (!timer_call_enter(&thread->wait_timer, deadline)) + if (!timer_call_enter(&thread->wait_timer, deadline, thread->sched_pri >= BASEPRI_RTQUEUES ? TIMER_CALL_CRITICAL : 0)) thread->wait_timer_active++; thread->wait_timer_is_set = TRUE; } @@ -429,6 +828,8 @@ thread_cancel_timer(void) splx(s); } +#endif /* __LP64__ */ + /* * thread_unblock: * @@ -474,19 +875,23 @@ thread_unblock( * Update run counts. */ sched_run_incr(); - if (thread->sched_mode & TH_MODE_TIMESHARE) + if (thread->sched_mode == TH_MODE_TIMESHARE) sched_share_incr(); } else { /* * Signal if idling on another processor. */ +#if CONFIG_SCHED_IDLE_IN_PLACE if (thread->state & TH_IDLE) { processor_t processor = thread->last_processor; if (processor != current_processor()) machine_signal_idle(processor); } +#else + assert((thread->state & TH_IDLE) == 0); +#endif result = TRUE; } @@ -494,7 +899,7 @@ thread_unblock( /* * Calculate deadline for real-time threads. */ - if (thread->sched_mode & TH_MODE_REALTIME) { + if (thread->sched_mode == TH_MODE_REALTIME) { thread->realtime.deadline = mach_absolute_time(); thread->realtime.deadline += thread->realtime.constraint; } @@ -508,7 +913,9 @@ thread_unblock( KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE, - (int)thread, (int)thread->sched_pri, 0, 0, 0); + (uintptr_t)thread_tid(thread), thread->sched_pri, 0, 0, 0); + + DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info); return (result); } @@ -560,6 +967,8 @@ thread_mark_wait_locked( { boolean_t at_safe_point; + assert(thread == current_thread()); + /* * The thread may have certain types of interrupts/aborts masked * off. Even if the wait location says these types of interrupts @@ -572,16 +981,19 @@ thread_mark_wait_locked( at_safe_point = (interruptible == THREAD_ABORTSAFE); if ( interruptible == THREAD_UNINT || - !(thread->sched_mode & TH_MODE_ABORT) || + !(thread->sched_flags & TH_SFLAG_ABORT) || (!at_safe_point && - (thread->sched_mode & TH_MODE_ABORTSAFELY))) { + (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) { + + DTRACE_SCHED(sleep); + thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT); thread->at_safe_point = at_safe_point; return (thread->wait_result = THREAD_WAITING); } else - if (thread->sched_mode & TH_MODE_ABORTSAFELY) - thread->sched_mode &= ~TH_MODE_ISABORTED; + if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) + thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; return (thread->wait_result = THREAD_INTERRUPTED); } @@ -676,7 +1088,7 @@ assert_wait_timeout( thread_lock(thread); clock_interval_to_deadline(interval, scale_factor, &deadline); - wresult = wait_queue_assert_wait64_locked(wqueue, (uint32_t)event, + wresult = wait_queue_assert_wait64_locked(wqueue, CAST_DOWN(event64_t, event), interruptible, deadline, thread); thread_unlock(thread); @@ -704,7 +1116,7 @@ assert_wait_deadline( wait_queue_lock(wqueue); thread_lock(thread); - wresult = wait_queue_assert_wait64_locked(wqueue, (uint32_t)event, + wresult = wait_queue_assert_wait64_locked(wqueue, CAST_DOWN(event64_t,event), interruptible, deadline, thread); thread_unlock(thread); @@ -769,57 +1181,6 @@ thread_sleep_usimple_lock( return res; } -/* - * thread_sleep_mutex: - * - * Cause the current thread to wait until the specified event - * occurs. The specified mutex is unlocked before releasing - * the cpu. The mutex will be re-acquired before returning. - * - * JMM - Add hint to make sure mutex is available before rousting - */ -wait_result_t -thread_sleep_mutex( - event_t event, - mutex_t *mutex, - wait_interrupt_t interruptible) -{ - wait_result_t res; - - res = assert_wait(event, interruptible); - if (res == THREAD_WAITING) { - mutex_unlock(mutex); - res = thread_block(THREAD_CONTINUE_NULL); - mutex_lock(mutex); - } - return res; -} - -/* - * thread_sleep_mutex_deadline: - * - * Cause the current thread to wait until the specified event - * (or deadline) occurs. The specified mutex is unlocked before - * releasing the cpu. The mutex will be re-acquired before returning. - */ -wait_result_t -thread_sleep_mutex_deadline( - event_t event, - mutex_t *mutex, - uint64_t deadline, - wait_interrupt_t interruptible) -{ - wait_result_t res; - - res = assert_wait_deadline(event, interruptible, deadline); - if (res == THREAD_WAITING) { - mutex_unlock(mutex); - res = thread_block(THREAD_CONTINUE_NULL); - mutex_lock(mutex); - } - return res; -} - /* * thread_sleep_lock_write: * @@ -1028,7 +1389,7 @@ clear_wait_internal( wait_result_t wresult) { wait_queue_t wq = thread->wait_queue; - int i = LockTimeOut; + uint32_t i = LockTimeOut; do { if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) @@ -1052,7 +1413,7 @@ clear_wait_internal( } return (thread_go(thread, wresult)); - } while (--i > 0); + } while ((--i > 0) || machine_timeout_suspended()); panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n", thread, wq, cpu_number()); @@ -1099,7 +1460,18 @@ kern_return_t thread_wakeup_prim( event_t event, boolean_t one_thread, - wait_result_t result) + wait_result_t result) +{ + return (thread_wakeup_prim_internal(event, one_thread, result, -1)); +} + + +kern_return_t +thread_wakeup_prim_internal( + event_t event, + boolean_t one_thread, + wait_result_t result, + int priority) { register wait_queue_t wq; register int index; @@ -1107,9 +1479,9 @@ thread_wakeup_prim( index = wait_hash(event); wq = &wait_queues[index]; if (one_thread) - return (wait_queue_wakeup_one(wq, event, result)); + return (wait_queue_wakeup_one(wq, event, result, priority)); else - return (wait_queue_wakeup_all(wq, event, result)); + return (wait_queue_wakeup_all(wq, event, result)); } /* @@ -1155,26 +1527,29 @@ thread_select( processor_t processor) { processor_set_t pset = processor->processor_set; - thread_t new_thread; - boolean_t other_runnable; + thread_t new_thread = THREAD_NULL; + boolean_t inactive_state; + + assert(processor == current_processor()); do { /* * Update the priority. */ - if (thread->sched_stamp != sched_tick) - update_priority(thread); - + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); + processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; pset_lock(pset); - simple_lock(&rt_lock); + assert(pset->low_count); + assert(pset->low_pri); - /* - * Check for other runnable threads. - */ - other_runnable = processor->runq.count > 0 || rt_runq.count > 0; + inactive_state = processor->state != PROCESSOR_SHUTDOWN && machine_processor_is_inactive(processor); + + simple_lock(&rt_lock); /* * Test to see if the current thread should continue @@ -1182,31 +1557,26 @@ thread_select( * bound to a different processor, nor be in the wrong * processor set. */ - if ( thread->state == TH_RUN && + if ( ((thread->state & ~TH_SUSP) == TH_RUN) && + (thread->sched_pri >= BASEPRI_RTQUEUES || + processor->processor_meta == PROCESSOR_META_NULL || + processor->processor_meta->primary == processor) && (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor) && (thread->affinity_set == AFFINITY_SET_NULL || thread->affinity_set->aset_pset == pset) ) { if ( thread->sched_pri >= BASEPRI_RTQUEUES && first_timeslice(processor) ) { - if (rt_runq.highq >= BASEPRI_RTQUEUES) { - register run_queue_t runq = &rt_runq; + if (rt_runq.count > 0) { register queue_t q; - q = runq->queues + runq->highq; + q = &rt_runq.queue; if (((thread_t)q->next)->realtime.deadline < processor->deadline) { - thread = (thread_t)q->next; - ((queue_entry_t)thread)->next->prev = q; - q->next = ((queue_entry_t)thread)->next; + thread = (thread_t)dequeue_head(q); thread->runq = PROCESSOR_NULL; - assert(thread->sched_mode & TH_MODE_PREEMPT); - runq->count--; runq->urgency--; - if (queue_empty(q)) { - if (runq->highq != IDLEPRI) - clrbit(MAXPRI - runq->highq, runq->bitmap); - runq->highq = MAXPRI - ffsbit(runq->bitmap); - } + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count--; } } @@ -1219,16 +1589,16 @@ thread_select( return (thread); } - if ( (!other_runnable || - (processor->runq.highq < thread->sched_pri && - rt_runq.highq < thread->sched_pri)) ) { + if (!inactive_state && (thread->sched_mode != TH_MODE_FAIRSHARE || SCHED(fairshare_runq_count)() == 0) && (rt_runq.count == 0 || BASEPRI_RTQUEUES < thread->sched_pri) && + (new_thread = SCHED(choose_thread)(processor, thread->sched_mode == TH_MODE_FAIRSHARE ? MINPRI : thread->sched_pri)) == THREAD_NULL) { simple_unlock(&rt_lock); /* I am the highest priority runnable (non-idle) thread */ - pset_hint_low(pset, processor); - pset_hint_high(pset, processor); + pset_pri_hint(pset, processor, processor->current_pri); + + pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor)); processor->deadline = UINT64_MAX; @@ -1238,45 +1608,116 @@ thread_select( } } - if (other_runnable) - return choose_thread(processor); + if (new_thread != THREAD_NULL || + (SCHED(processor_queue_has_priority)(processor, rt_runq.count == 0 ? IDLEPRI : BASEPRI_RTQUEUES, TRUE) && + (new_thread = SCHED(choose_thread)(processor, MINPRI)) != THREAD_NULL)) { + simple_unlock(&rt_lock); - simple_unlock(&rt_lock); + if (!inactive_state) { + pset_pri_hint(pset, processor, new_thread->sched_pri); - /* - * No runnable threads, attempt to steal - * from other processors. - */ - if (pset->high_hint != PROCESSOR_NULL && pset->high_hint->runq.count > 0) { - new_thread = steal_thread(pset->high_hint); - if (new_thread != THREAD_NULL) { + pset_count_hint(pset, processor, SCHED(processor_runq_count)(processor)); + } + + processor->deadline = UINT64_MAX; pset_unlock(pset); return (new_thread); - } } - /* - * Nothing is runnable, so set this processor idle if it - * was running. + if (rt_runq.count > 0) { + thread = (thread_t)dequeue_head(&rt_runq.queue); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count--; + + simple_unlock(&rt_lock); + + processor->deadline = thread->realtime.deadline; + pset_unlock(pset); + + return (thread); + } + + simple_unlock(&rt_lock); + + /* No realtime threads and no normal threads on the per-processor + * runqueue. Finally check for global fairshare threads. + */ + if ((new_thread = SCHED(fairshare_dequeue)()) != THREAD_NULL) { + + processor->deadline = UINT64_MAX; + pset_unlock(pset); + + return (new_thread); + } + + processor->deadline = UINT64_MAX; + + /* + * Set processor inactive based on + * indication from the platform code. + */ + if (inactive_state) { + if (processor->state == PROCESSOR_RUNNING) + remqueue((queue_entry_t)processor); + else + if (processor->state == PROCESSOR_IDLE) + remqueue((queue_entry_t)processor); + + processor->state = PROCESSOR_INACTIVE; + + pset_unlock(pset); + + return (processor->idle_thread); + } + + /* + * No runnable threads, attempt to steal + * from other processors. + */ + new_thread = SCHED(steal_thread)(pset); + if (new_thread != THREAD_NULL) { + return (new_thread); + } + + /* + * If other threads have appeared, shortcut + * around again. + */ + if (!SCHED(processor_queue_empty)(processor) || rt_runq.count > 0 || SCHED(fairshare_runq_count)() > 0) + continue; + + pset_lock(pset); + + /* + * Nothing is runnable, so set this processor idle if it + * was running. */ if (processor->state == PROCESSOR_RUNNING) { - remqueue(&pset->active_queue, (queue_entry_t)processor); + remqueue((queue_entry_t)processor); processor->state = PROCESSOR_IDLE; - enqueue_head(&pset->idle_queue, (queue_entry_t)processor); - pset->low_hint = processor; - pset->idle_count++; + if (processor->processor_meta == PROCESSOR_META_NULL || processor->processor_meta->primary == processor) { + enqueue_head(&pset->idle_queue, (queue_entry_t)processor); + pset_pri_init_hint(pset, processor); + pset_count_init_hint(pset, processor); + } + else { + enqueue_head(&processor->processor_meta->idle_queue, (queue_entry_t)processor); + pset_unlock(pset); + return (processor->idle_thread); + } } - processor->deadline = UINT64_MAX; - pset_unlock(pset); +#if CONFIG_SCHED_IDLE_IN_PLACE /* * Choose idle thread if fast idle is not possible. */ - if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active) + if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES) return (processor->idle_thread); /* @@ -1286,11 +1727,23 @@ thread_select( */ new_thread = thread_select_idle(thread, processor); +#else /* !CONFIG_SCHED_IDLE_IN_PLACE */ + + /* + * Do a full context switch to idle so that the current + * thread can start running on another processor without + * waiting for the fast-idled processor to wake up. + */ + return (processor->idle_thread); + +#endif /* !CONFIG_SCHED_IDLE_IN_PLACE */ + } while (new_thread == THREAD_NULL); return (new_thread); } +#if CONFIG_SCHED_IDLE_IN_PLACE /* * thread_select_idle: * @@ -1305,12 +1758,13 @@ thread_select_idle( { thread_t new_thread; - if (thread->sched_mode & TH_MODE_TIMESHARE) + if (thread->sched_mode == TH_MODE_TIMESHARE) sched_share_decr(); sched_run_decr(); thread->state |= TH_IDLE; processor->current_pri = IDLEPRI; + processor->current_thmode = TH_MODE_NONE; thread_unlock(thread); @@ -1318,6 +1772,7 @@ thread_select_idle( * Switch execution timing to processor idle thread. */ processor->last_dispatch = mach_absolute_time(); + thread->last_run_time = processor->last_dispatch; thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer); PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer; @@ -1329,16 +1784,38 @@ thread_select_idle( (*thread->sched_call)(SCHED_CALL_BLOCK, thread); + thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0); + /* * Enable interrupts and perform idling activities. No * preemption due to TH_IDLE being set. */ spllo(); new_thread = processor_idle(thread, processor); + /* + * Return at splsched. + */ (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread); thread_lock(thread); + /* + * If we idled in place, simulate a context switch back + * to the original priority of the thread so that the + * platform layer cannot distinguish this from a true + * switch to the idle thread. + */ + if (thread->sched_mode == TH_MODE_REALTIME) + thread_tell_urgency(THREAD_URGENCY_REAL_TIME, thread->realtime.period, thread->realtime.deadline); + /* Identify non-promoted threads which have requested a + * "background" priority. + */ + else if ((thread->sched_pri <= MAXPRI_THROTTLE) && + (thread->priority <= MAXPRI_THROTTLE)) + thread_tell_urgency(THREAD_URGENCY_BACKGROUND, thread->sched_pri, thread->priority); + else + thread_tell_urgency(THREAD_URGENCY_NORMAL, thread->sched_pri, thread->priority); + /* * If awakened, switch to thread timer and start a new quantum. * Otherwise skip; we will context switch to another thread or return here. @@ -1349,9 +1826,10 @@ thread_select_idle( PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; thread_quantum_init(thread); + thread->last_quantum_refill_time = processor->last_dispatch; processor->quantum_end = processor->last_dispatch + thread->current_quantum; - timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end); + timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end, TIMER_CALL_CRITICAL); processor->timeslice = 1; thread->computation_epoch = processor->last_dispatch; @@ -1360,11 +1838,86 @@ thread_select_idle( thread->state &= ~TH_IDLE; sched_run_incr(); - if (thread->sched_mode & TH_MODE_TIMESHARE) + if (thread->sched_mode == TH_MODE_TIMESHARE) sched_share_incr(); return (new_thread); } +#endif /* CONFIG_SCHED_IDLE_IN_PLACE */ + +#if defined(CONFIG_SCHED_TRADITIONAL) +static thread_t +sched_traditional_choose_thread( + processor_t processor, + int priority) +{ + thread_t thread; + + thread = choose_thread(processor, runq_for_processor(processor), priority); + if (thread != THREAD_NULL) { + runq_consider_decr_bound_count(processor, thread); + } + + return thread; +} + +#endif /* defined(CONFIG_SCHED_TRADITIONAL) */ + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_FIXEDPRIORITY) + +/* + * choose_thread: + * + * Locate a thread to execute from the processor run queue + * and return it. Only choose a thread with greater or equal + * priority. + * + * Associated pset must be locked. Returns THREAD_NULL + * on failure. + */ +thread_t +choose_thread( + processor_t processor, + run_queue_t rq, + int priority) +{ + queue_t queue = rq->queues + rq->highq; + int pri = rq->highq, count = rq->count; + thread_t thread; + + while (count > 0 && pri >= priority) { + thread = (thread_t)queue_first(queue); + while (!queue_end(queue, (queue_entry_t)thread)) { + if (thread->bound_processor == PROCESSOR_NULL || + thread->bound_processor == processor) { + remqueue((queue_entry_t)thread); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + rq->count--; + if (SCHED(priority_is_urgent)(pri)) { + rq->urgency--; assert(rq->urgency >= 0); + } + if (queue_empty(queue)) { + if (pri != IDLEPRI) + clrbit(MAXPRI - pri, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } + + return (thread); + } + count--; + + thread = (thread_t)queue_next((queue_entry_t)thread); + } + + queue--; pri--; + } + + return (THREAD_NULL); +} + +#endif /* defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_FIXEDPRIORITY) */ /* * Perform a context switch and start executing the new thread. @@ -1410,9 +1963,12 @@ thread_invoke( void *parameter = self->parameter; processor_t processor; - if (get_preemption_level() != 0) - panic("thread_invoke: preemption_level %d\n", - get_preemption_level()); + if (get_preemption_level() != 0) { + int pl = get_preemption_level(); + panic("thread_invoke: preemption_level %d, possible cause: %s", + pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" : + "blocking while holding a spinlock, or within interrupt context")); + } assert(self == current_thread()); @@ -1430,7 +1986,7 @@ thread_invoke( * Allow time constraint threads to hang onto * a stack. */ - if ((self->sched_mode & TH_MODE_REALTIME) && !self->reserved_stack) + if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) self->reserved_stack = self->kernel_stack; if (continuation != NULL) { @@ -1452,6 +2008,7 @@ thread_invoke( processor = current_processor(); processor->active_thread = thread; processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; if (thread->last_processor != processor && thread->last_processor != NULL) { if (thread->last_processor->processor_set != processor->processor_set) thread->ps_switch++; @@ -1465,14 +2022,26 @@ thread_invoke( self->reason = reason; processor->last_dispatch = mach_absolute_time(); + self->last_run_time = processor->last_dispatch; thread_timer_event(processor->last_dispatch, &thread->system_timer); PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE, - self->reason, (int)thread, self->sched_pri, thread->sched_pri, 0); + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + + if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); + } -TLOG(1, "thread_invoke: calling machine_stack_handoff\n"); - machine_stack_handoff(self, thread); + DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info); + + SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri); + + TLOG(1, "thread_invoke: calling stack_handoff\n"); + stack_handoff(self, thread); + + DTRACE_SCHED(on__cpu); thread_dispatch(self, thread); @@ -1493,6 +2062,9 @@ TLOG(1, "thread_invoke: calling machine_stack_handoff\n"); counter(++c_thread_invoke_same); thread_unlock(self); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + self->continuation = self->parameter = NULL; funnel_refunnel_check(self, 3); @@ -1519,6 +2091,10 @@ need_stack: ast_context(self); counter(++c_thread_invoke_same); thread_unlock(self); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + return (TRUE); } } @@ -1529,6 +2105,7 @@ need_stack: processor = current_processor(); processor->active_thread = thread; processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; if (thread->last_processor != processor && thread->last_processor != NULL) { if (thread->last_processor->processor_set != processor->processor_set) thread->ps_switch++; @@ -1545,11 +2122,21 @@ need_stack: self->reason = reason; processor->last_dispatch = mach_absolute_time(); + self->last_run_time = processor->last_dispatch; thread_timer_event(processor->last_dispatch, &thread->system_timer); PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, - (int)self->reason, (int)thread, self->sched_pri, thread->sched_pri, 0); + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + + if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); + } + + DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info); + + SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri); /* * This is where we actually switch register context, @@ -1557,7 +2144,9 @@ need_stack: * as a result of a subsequent context switch. */ thread = machine_switch_context(self, continuation, thread); -TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread); + TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread); + + DTRACE_SCHED(on__cpu); /* * We have been resumed and are set to run. @@ -1611,11 +2200,11 @@ thread_dispatch( */ if ( first_timeslice(processor) && processor->quantum_end > processor->last_dispatch ) - thread->current_quantum = (processor->quantum_end - processor->last_dispatch); + thread->current_quantum = (uint32_t)(processor->quantum_end - processor->last_dispatch); else thread->current_quantum = 0; - if (thread->sched_mode & TH_MODE_REALTIME) { + if (thread->sched_mode == TH_MODE_REALTIME) { /* * Cancel the deadline if the thread has * consumed the entire quantum. @@ -1624,8 +2213,8 @@ thread_dispatch( thread->realtime.deadline = UINT64_MAX; thread->reason |= AST_QUANTUM; } - } - else { + } else { +#if defined(CONFIG_SCHED_TRADITIONAL) /* * For non-realtime threads treat a tiny * remaining quantum as an expired quantum @@ -1635,6 +2224,7 @@ thread_dispatch( thread->reason |= AST_QUANTUM; thread->current_quantum += std_quantum; } +#endif } /* @@ -1647,9 +2237,7 @@ thread_dispatch( thread->current_quantum = 0; } - thread->last_switch = processor->last_dispatch; - - thread->computation_metered += (thread->last_switch - thread->computation_epoch); + thread->computation_metered += (processor->last_dispatch - thread->computation_epoch); if (!(thread->state & TH_WAIT)) { /* @@ -1672,12 +2260,25 @@ thread_dispatch( /* * Waiting. */ + boolean_t should_terminate = FALSE; + + /* Only the first call to thread_dispatch + * after explicit termination should add + * the thread to the termination queue + */ + if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) { + should_terminate = TRUE; + thread->state |= TH_TERMINATE2; + } + thread->state &= ~TH_RUN; - if (thread->sched_mode & TH_MODE_TIMESHARE) + if (thread->sched_mode == TH_MODE_TIMESHARE) sched_share_decr(); sched_run_decr(); + (*thread->sched_call)(SCHED_CALL_BLOCK, thread); + if (thread->wake_active) { thread->wake_active = FALSE; thread_unlock(thread); @@ -1689,39 +2290,55 @@ thread_dispatch( wake_unlock(thread); - (*thread->sched_call)(SCHED_CALL_BLOCK, thread); - - if (thread->state & TH_TERMINATE) + if (should_terminate) thread_terminate_enqueue(thread); } } } if (!(self->state & TH_IDLE)) { + + if (self->sched_mode == TH_MODE_REALTIME) + thread_tell_urgency(THREAD_URGENCY_REAL_TIME, self->realtime.period, self->realtime.deadline); + /* Identify non-promoted threads which have requested a + * "background" priority. + */ + else if ((self->sched_pri <= MAXPRI_THROTTLE) && + (self->priority <= MAXPRI_THROTTLE)) + thread_tell_urgency(THREAD_URGENCY_BACKGROUND, self->sched_pri, self->priority); + else + thread_tell_urgency(THREAD_URGENCY_NORMAL, self->sched_pri, self->priority); /* * Get a new quantum if none remaining. */ - if (self->current_quantum == 0) + if (self->current_quantum == 0) { thread_quantum_init(self); + self->last_quantum_refill_time = processor->last_dispatch; + } /* * Set up quantum timer and timeslice. */ processor->quantum_end = (processor->last_dispatch + self->current_quantum); - timer_call_enter1(&processor->quantum_timer, self, processor->quantum_end); + timer_call_enter1(&processor->quantum_timer, self, processor->quantum_end, TIMER_CALL_CRITICAL); processor->timeslice = 1; - self->last_switch = processor->last_dispatch; - - self->computation_epoch = self->last_switch; + self->computation_epoch = processor->last_dispatch; } else { timer_call_cancel(&processor->quantum_timer); processor->timeslice = 0; + + thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0); } } +#include + +uint32_t kdebug_thread_block = 0; + + /* * thread_block_reason: * @@ -1765,6 +2382,15 @@ thread_block_reason( self->continuation = continuation; self->parameter = parameter; + if (__improbable(kdebug_thread_block && kdebug_enable && self->state != TH_RUN)) { + uint32_t bt[8]; + + OSBacktrace((void **)&bt[0], 8); + + KERNEL_DEBUG_CONSTANT(0x140004c | DBG_FUNC_START, bt[0], bt[1], bt[2], bt[3], 0); + KERNEL_DEBUG_CONSTANT(0x140004c | DBG_FUNC_END, bt[4], bt[5], bt[6], bt[7], 0); + } + do { thread_lock(self); new_thread = thread_select(self, processor); @@ -1848,7 +2474,9 @@ thread_continue( register thread_t self = current_thread(); register thread_continue_t continuation; register void *parameter; - + + DTRACE_SCHED(on__cpu); + continuation = self->continuation; parameter = self->parameter; @@ -1866,6 +2494,40 @@ thread_continue( /*NOTREACHED*/ } +void +thread_quantum_init(thread_t thread) +{ + if (thread->sched_mode == TH_MODE_REALTIME) { + thread->current_quantum = thread->realtime.computation; + } else { + thread->current_quantum = SCHED(initial_quantum_size)(thread); + } +} + +#if defined(CONFIG_SCHED_TRADITIONAL) +static uint32_t +sched_traditional_initial_quantum_size(thread_t thread __unused) +{ + return std_quantum; +} + +static sched_mode_t +sched_traditional_initial_thread_sched_mode(task_t parent_task) +{ + if (parent_task == kernel_task) + return TH_MODE_FIXED; + else + return TH_MODE_TIMESHARE; +} + +static boolean_t +sched_traditional_supports_timeshare_mode(void) +{ + return TRUE; +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* * run_queue_init: * @@ -1886,16 +2548,97 @@ run_queue_init( queue_init(&rq->queues[i]); } +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) +int +sched_traditional_fairshare_runq_count(void) +{ + return fs_runq.count; +} + +uint64_t +sched_traditional_fairshare_runq_stats_count_sum(void) +{ + return fs_runq.runq_stats.count_sum; +} + +void +sched_traditional_fairshare_enqueue(thread_t thread) +{ + queue_t queue = &fs_runq.queue; + + simple_lock(&fs_lock); + + enqueue_tail(queue, (queue_entry_t)thread); + + thread->runq = FS_RUNQ; + SCHED_STATS_RUNQ_CHANGE(&fs_runq.runq_stats, fs_runq.count); + fs_runq.count++; + + simple_unlock(&fs_lock); +} + +thread_t +sched_traditional_fairshare_dequeue(void) +{ + thread_t thread; + + simple_lock(&fs_lock); + if (fs_runq.count > 0) { + thread = (thread_t)dequeue_head(&fs_runq.queue); + + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&fs_runq.runq_stats, fs_runq.count); + fs_runq.count--; + + simple_unlock(&fs_lock); + + return (thread); + } + simple_unlock(&fs_lock); + + return THREAD_NULL; +} + +boolean_t +sched_traditional_fairshare_queue_remove(thread_t thread) +{ + queue_t q; + + simple_lock(&fs_lock); + q = &fs_runq.queue; + + if (FS_RUNQ == thread->runq) { + remqueue((queue_entry_t)thread); + SCHED_STATS_RUNQ_CHANGE(&fs_runq.runq_stats, fs_runq.count); + fs_runq.count--; + + thread->runq = PROCESSOR_NULL; + simple_unlock(&fs_lock); + return (TRUE); + } + else { + /* + * The thread left the run queue before we could + * lock the run queue. + */ + assert(thread->runq == PROCESSOR_NULL); + simple_unlock(&fs_lock); + return (FALSE); + } +} + +#endif /* defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) */ + /* * run_queue_dequeue: * * Perform a dequeue operation on a run queue, * and return the resulting thread. * - * The run queue must be locked (see run_queue_remove() + * The run queue must be locked (see thread_run_queue_remove() * for more info), and not empty. */ -static thread_t +thread_t run_queue_dequeue( run_queue_t rq, integer_t options) @@ -1904,20 +2647,18 @@ run_queue_dequeue( queue_t queue = rq->queues + rq->highq; if (options & SCHED_HEADQ) { - thread = (thread_t)queue->next; - ((queue_entry_t)thread)->next->prev = queue; - queue->next = ((queue_entry_t)thread)->next; + thread = (thread_t)dequeue_head(queue); } else { - thread = (thread_t)queue->prev; - ((queue_entry_t)thread)->prev->next = queue; - queue->prev = ((queue_entry_t)thread)->prev; + thread = (thread_t)dequeue_tail(queue); } thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); rq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - rq->urgency--; + if (SCHED(priority_is_urgent)(rq->highq)) { + rq->urgency--; assert(rq->urgency >= 0); + } if (queue_empty(queue)) { if (rq->highq != IDLEPRI) clrbit(MAXPRI - rq->highq, rq->bitmap); @@ -1928,54 +2669,146 @@ run_queue_dequeue( } /* - * realtime_queue_insert: + * run_queue_enqueue: * - * Enqueue a thread for realtime execution. + * Perform a enqueue operation on a run queue. + * + * The run queue must be locked (see thread_run_queue_remove() + * for more info). */ -static boolean_t -realtime_queue_insert( - thread_t thread) +boolean_t +run_queue_enqueue( + run_queue_t rq, + thread_t thread, + integer_t options) { - run_queue_t rq = &rt_runq; - queue_t queue = rq->queues + thread->sched_pri; - uint64_t deadline = thread->realtime.deadline; - boolean_t preempt = FALSE; - - simple_lock(&rt_lock); - + queue_t queue = rq->queues + thread->sched_pri; + boolean_t result = FALSE; + if (queue_empty(queue)) { enqueue_tail(queue, (queue_entry_t)thread); - + setbit(MAXPRI - thread->sched_pri, rq->bitmap); - if (thread->sched_pri > rq->highq) + if (thread->sched_pri > rq->highq) { rq->highq = thread->sched_pri; - preempt = TRUE; - } - else { - register thread_t entry = (thread_t)queue_first(queue); - - while (TRUE) { - if ( queue_end(queue, (queue_entry_t)entry) || - deadline < entry->realtime.deadline ) { - entry = (thread_t)queue_prev((queue_entry_t)entry); - break; - } - - entry = (thread_t)queue_next((queue_entry_t)entry); + result = TRUE; } - - if ((queue_entry_t)entry == queue) - preempt = TRUE; - - insque((queue_entry_t)thread, (queue_entry_t)entry); } + else + if (options & SCHED_TAILQ) + enqueue_tail(queue, (queue_entry_t)thread); + else + enqueue_head(queue, (queue_entry_t)thread); + + if (SCHED(priority_is_urgent)(thread->sched_pri)) + rq->urgency++; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + rq->count++; + + return (result); + +} + +/* + * run_queue_remove: + * + * Remove a specific thread from a runqueue. + * + * The run queue must be locked. + */ +void +run_queue_remove( + run_queue_t rq, + thread_t thread) +{ + + remqueue((queue_entry_t)thread); + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + rq->count--; + if (SCHED(priority_is_urgent)(thread->sched_pri)) { + rq->urgency--; assert(rq->urgency >= 0); + } + + if (queue_empty(rq->queues + thread->sched_pri)) { + /* update run queue status */ + if (thread->sched_pri != IDLEPRI) + clrbit(MAXPRI - thread->sched_pri, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } + + thread->runq = PROCESSOR_NULL; +} + +/* + * fairshare_setrun: + * + * Dispatch a thread for round-robin execution. + * + * Thread must be locked. Associated pset must + * be locked, and is returned unlocked. + */ +static void +fairshare_setrun( + processor_t processor, + thread_t thread) +{ + processor_set_t pset = processor->processor_set; + + thread->chosen_processor = processor; + + SCHED(fairshare_enqueue)(thread); + + if (processor != current_processor()) + machine_signal_idle(processor); + + pset_unlock(pset); + +} + +/* + * realtime_queue_insert: + * + * Enqueue a thread for realtime execution. + */ +static boolean_t +realtime_queue_insert( + thread_t thread) +{ + queue_t queue = &rt_runq.queue; + uint64_t deadline = thread->realtime.deadline; + boolean_t preempt = FALSE; + + simple_lock(&rt_lock); + + if (queue_empty(queue)) { + enqueue_tail(queue, (queue_entry_t)thread); + preempt = TRUE; + } + else { + register thread_t entry = (thread_t)queue_first(queue); + + while (TRUE) { + if ( queue_end(queue, (queue_entry_t)entry) || + deadline < entry->realtime.deadline ) { + entry = (thread_t)queue_prev((queue_entry_t)entry); + break; + } + + entry = (thread_t)queue_next((queue_entry_t)entry); + } + + if ((queue_entry_t)entry == queue) + preempt = TRUE; + + insque((queue_entry_t)thread, (queue_entry_t)entry); + } + + thread->runq = RT_RUNQ; + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count++; + + simple_unlock(&rt_lock); - thread->runq = RT_RUNQ; - assert(thread->sched_mode & TH_MODE_PREEMPT); - rq->count++; rq->urgency++; - - simple_unlock(&rt_lock); - return (preempt); } @@ -1994,13 +2827,15 @@ realtime_setrun( { processor_set_t pset = processor->processor_set; + thread->chosen_processor = processor; + /* * Dispatch directly onto idle processor. */ - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; - enqueue_head(&pset->active_queue, (queue_entry_t)processor); + if ( (thread->bound_processor == processor) + && processor->state == PROCESSOR_IDLE) { + remqueue((queue_entry_t)processor); + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); processor->next_thread = thread; processor->deadline = thread->realtime.deadline; @@ -2013,8 +2848,11 @@ realtime_setrun( } if (realtime_queue_insert(thread)) { + int prstate = processor->state; if (processor == current_processor()) ast_on(AST_PREEMPT | AST_URGENT); + else if ((prstate == PROCESSOR_DISPATCHING) || (prstate == PROCESSOR_IDLE)) + machine_signal_idle(processor); else cause_ast_check(processor); } @@ -2022,6 +2860,14 @@ realtime_setrun( pset_unlock(pset); } +#if defined(CONFIG_SCHED_TRADITIONAL) + +static boolean_t +priority_is_urgent(int priority) +{ + return testbit(priority, sched_preempt_pri) ? TRUE : FALSE; +} + /* * processor_enqueue: * @@ -2031,7 +2877,7 @@ realtime_setrun( * Returns TRUE if a preemption is indicated based on the state * of the run queue. * - * The run queue must be locked (see run_queue_remove() + * The run queue must be locked (see thread_run_queue_remove() * for more info). */ static boolean_t @@ -2040,33 +2886,18 @@ processor_enqueue( thread_t thread, integer_t options) { - run_queue_t rq = &processor->runq; - queue_t queue = rq->queues + thread->sched_pri; - boolean_t result = FALSE; + run_queue_t rq = runq_for_processor(processor); + boolean_t result; - if (queue_empty(queue)) { - enqueue_tail(queue, (queue_entry_t)thread); - - setbit(MAXPRI - thread->sched_pri, rq->bitmap); - if (thread->sched_pri > rq->highq) { - rq->highq = thread->sched_pri; - result = TRUE; - } - } - else - if (options & SCHED_TAILQ) - enqueue_tail(queue, (queue_entry_t)thread); - else - enqueue_head(queue, (queue_entry_t)thread); - + result = run_queue_enqueue(rq, thread, options); thread->runq = processor; - if (thread->sched_mode & TH_MODE_PREEMPT) - rq->urgency++; - rq->count++; + runq_consider_incr_bound_count(processor, thread); return (result); } +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* * processor_setrun: * @@ -2085,13 +2916,16 @@ processor_setrun( processor_set_t pset = processor->processor_set; ast_t preempt; + thread->chosen_processor = processor; + /* * Dispatch directly onto idle processor. */ - if (processor->state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; - enqueue_head(&pset->active_queue, (queue_entry_t)processor); + if ( (SCHED(direct_dispatch_to_idle_processors) || + thread->bound_processor == processor) + && processor->state == PROCESSOR_IDLE) { + remqueue((queue_entry_t)processor); + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); processor->next_thread = thread; processor->deadline = UINT64_MAX; @@ -2106,30 +2940,33 @@ processor_setrun( /* * Set preemption mode. */ - if (thread->sched_mode & TH_MODE_PREEMPT) + if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) + preempt = (AST_PREEMPT | AST_URGENT); + else if(processor->active_thread && thread_eager_preemption(processor->active_thread)) preempt = (AST_PREEMPT | AST_URGENT); else - if (thread->sched_mode & TH_MODE_TIMESHARE && thread->priority < BASEPRI_BACKGROUND) + if ((thread->sched_mode == TH_MODE_TIMESHARE) && thread->sched_pri < thread->priority) preempt = AST_NONE; else preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE; - if (!processor_enqueue(processor, thread, options)) + if (!SCHED(processor_enqueue)(processor, thread, options)) preempt = AST_NONE; - pset_hint_high(pset, processor); - if (preempt != AST_NONE) { if (processor == current_processor()) { - thread_t self = processor->active_thread; - - if (csw_needed(self, processor)) + if (csw_check(processor) != AST_NONE) ast_on(preempt); } else + if ( processor->state == PROCESSOR_IDLE || processor->state == PROCESSOR_DISPATCHING) { + machine_signal_idle(processor); + } + else if ( (processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_SHUTDOWN) && - thread->sched_pri >= processor->current_pri ) { + (thread->sched_pri >= processor->current_pri || + processor->current_thmode == TH_MODE_FAIRSHARE)) { cause_ast_check(processor); } } @@ -2138,10 +2975,112 @@ processor_setrun( thread->sched_pri >= processor->current_pri ) { cause_ast_check(processor); } + else + if ( processor->state == PROCESSOR_IDLE && + processor != current_processor() ) { + machine_signal_idle(processor); + } pset_unlock(pset); } +#if defined(CONFIG_SCHED_TRADITIONAL) + +static boolean_t +processor_queue_empty(processor_t processor) +{ + return runq_for_processor(processor)->count == 0; + +} + +static boolean_t +sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor) +{ + processor_set_t pset = processor->processor_set; + int count = runq_for_processor(processor)->count; + + /* + * The pset runq contains the count of all runnable threads + * for all processors in the pset. However, for threads that + * are bound to another processor, the current "processor" + * is not eligible to execute the thread. So we only + * include bound threads that our bound to the current + * "processor". This allows the processor to idle when the + * count of eligible threads drops to 0, even if there's + * a runnable thread bound to a different processor in the + * shared runq. + */ + + count -= pset->pset_runq_bound_count; + count += processor->runq_bound_count; + + return count == 0; +} + +static ast_t +processor_csw_check(processor_t processor) +{ + run_queue_t runq; + + assert(processor->active_thread != NULL); + + runq = runq_for_processor(processor); + if (runq->highq > processor->current_pri) { + if (runq->urgency > 0) + return (AST_PREEMPT | AST_URGENT); + + if (processor->active_thread && thread_eager_preemption(processor->active_thread)) + return (AST_PREEMPT | AST_URGENT); + + return AST_PREEMPT; + } + + return AST_NONE; +} + +static boolean_t +processor_queue_has_priority(processor_t processor, + int priority, + boolean_t gte) +{ + if (gte) + return runq_for_processor(processor)->highq >= priority; + else + return runq_for_processor(processor)->highq > priority; +} + +static boolean_t +should_current_thread_rechoose_processor(processor_t processor) +{ + return (processor->current_pri < BASEPRI_RTQUEUES + && processor->processor_meta != PROCESSOR_META_NULL + && processor->processor_meta->primary != processor); +} + +static int +sched_traditional_processor_runq_count(processor_t processor) +{ + return runq_for_processor(processor)->count; +} + + +static uint64_t +sched_traditional_processor_runq_stats_count_sum(processor_t processor) +{ + return runq_for_processor(processor)->runq_stats.count_sum; +} + +static uint64_t +sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor) +{ + if (processor->cpu_id == processor->processor_set->cpu_set_low) + return runq_for_processor(processor)->runq_stats.count_sum; + else + return 0ULL; +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + #define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets) /* @@ -2161,15 +3100,16 @@ choose_next_pset( do { nset = next_pset(nset); - } while (nset->processor_count < 1 && nset != pset); + } while (nset->online_processor_count < 1 && nset != pset); - return ((nset != pset)? nset: pset); + return (nset); } /* * choose_processor: * * Choose a processor for the thread, beginning at + * the pset. Accepts an optional processor hint in * the pset. * * Returns a processor, possibly from a different pset. @@ -2177,13 +3117,41 @@ choose_next_pset( * The thread must be locked. The pset must be locked, * and the resulting pset is locked on return. */ -static processor_t +processor_t choose_processor( processor_set_t pset, + processor_t processor, thread_t thread) { processor_set_t nset, cset = pset; - processor_t processor; + processor_meta_t pmeta = PROCESSOR_META_NULL; + processor_t mprocessor; + + /* + * Prefer the hinted processor, when appropriate. + */ + + if (processor != PROCESSOR_NULL) { + if (processor->processor_meta != PROCESSOR_META_NULL) + processor = processor->processor_meta->primary; + } + + mprocessor = machine_choose_processor(pset, processor); + if (mprocessor != PROCESSOR_NULL) + processor = mprocessor; + + if (processor != PROCESSOR_NULL) { + if (processor->processor_set != pset || + processor->state == PROCESSOR_INACTIVE || + processor->state == PROCESSOR_SHUTDOWN || + processor->state == PROCESSOR_OFF_LINE) + processor = PROCESSOR_NULL; + else + if (processor->state == PROCESSOR_IDLE || + ((thread->sched_pri >= BASEPRI_RTQUEUES) && + (processor->current_pri < BASEPRI_RTQUEUES))) + return (processor); + } /* * Iterate through the processor sets to locate @@ -2197,34 +3165,101 @@ choose_processor( return ((processor_t)queue_first(&cset->idle_queue)); if (thread->sched_pri >= BASEPRI_RTQUEUES) { - /* - * For an RT thread, iterate through active processors, first fit. - */ + integer_t lowest_priority = MAXPRI + 1; + integer_t lowest_unpaired = MAXPRI + 1; + uint64_t furthest_deadline = 1; + processor_t lp_processor = PROCESSOR_NULL; + processor_t lp_unpaired = PROCESSOR_NULL; + processor_t fd_processor = PROCESSOR_NULL; + + lp_processor = cset->low_pri; + /* Consider hinted processor */ + if (lp_processor != PROCESSOR_NULL && + ((lp_processor->processor_meta == PROCESSOR_META_NULL) || + ((lp_processor == lp_processor->processor_meta->primary) && + !queue_empty(&lp_processor->processor_meta->idle_queue))) && + lp_processor->state != PROCESSOR_INACTIVE && + lp_processor->state != PROCESSOR_SHUTDOWN && + lp_processor->state != PROCESSOR_OFF_LINE && + (lp_processor->current_pri < thread->sched_pri)) + return lp_processor; + processor = (processor_t)queue_first(&cset->active_queue); while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) { - if (thread->sched_pri > processor->current_pri || - thread->realtime.deadline < processor->deadline) - return (processor); + /* Discover the processor executing the + * thread with the lowest priority within + * this pset, or the one with the furthest + * deadline + */ + integer_t cpri = processor->current_pri; + if (cpri < lowest_priority) { + lowest_priority = cpri; + lp_processor = processor; + } + + if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) { + furthest_deadline = processor->deadline; + fd_processor = processor; + } + + if (processor->processor_meta != PROCESSOR_META_NULL && + !queue_empty(&processor->processor_meta->idle_queue)) { + if (cpri < lowest_unpaired) { + lowest_unpaired = cpri; + lp_unpaired = processor; + pmeta = processor->processor_meta; + } + else + if (pmeta == PROCESSOR_META_NULL) + pmeta = processor->processor_meta; + } processor = (processor_t)queue_next((queue_entry_t)processor); } + + if (thread->sched_pri > lowest_unpaired) + return lp_unpaired; + + if (pmeta != PROCESSOR_META_NULL) + return ((processor_t)queue_first(&pmeta->idle_queue)); + if (thread->sched_pri > lowest_priority) + return lp_processor; + if (thread->realtime.deadline < furthest_deadline) + return fd_processor; + + processor = PROCESSOR_NULL; } else { /* - * Choose the low hint processor in the processor set if available. + * Check any hinted processors in the processor set if available. */ - processor = cset->low_hint; - if (processor != PROCESSOR_NULL && - processor->state != PROCESSOR_SHUTDOWN && processor->state != PROCESSOR_OFF_LINE) - return (processor); + if (cset->low_pri != PROCESSOR_NULL && cset->low_pri->state != PROCESSOR_INACTIVE && + cset->low_pri->state != PROCESSOR_SHUTDOWN && cset->low_pri->state != PROCESSOR_OFF_LINE && + (processor == PROCESSOR_NULL || + (thread->sched_pri > BASEPRI_DEFAULT && cset->low_pri->current_pri < thread->sched_pri))) { + processor = cset->low_pri; + } + else + if (cset->low_count != PROCESSOR_NULL && cset->low_count->state != PROCESSOR_INACTIVE && + cset->low_count->state != PROCESSOR_SHUTDOWN && cset->low_count->state != PROCESSOR_OFF_LINE && + (processor == PROCESSOR_NULL || (thread->sched_pri <= BASEPRI_DEFAULT && + SCHED(processor_runq_count)(cset->low_count) < SCHED(processor_runq_count)(processor)))) { + processor = cset->low_count; + } /* - * Choose any active processor if the hint was invalid. + * Otherwise, choose an available processor in the set. */ - processor = (processor_t)dequeue_head(&cset->active_queue); - if (processor != PROCESSOR_NULL) { - enqueue_tail(&cset->active_queue, (queue_entry_t)processor); - return (processor); + if (processor == PROCESSOR_NULL) { + processor = (processor_t)dequeue_head(&cset->active_queue); + if (processor != PROCESSOR_NULL) + enqueue_tail(&cset->active_queue, (queue_entry_t)processor); + } + + if (processor != PROCESSOR_NULL && pmeta == PROCESSOR_META_NULL) { + if (processor->processor_meta != PROCESSOR_META_NULL && + !queue_empty(&processor->processor_meta->idle_queue)) + pmeta = processor->processor_meta; } } @@ -2242,16 +3277,63 @@ choose_processor( } while (nset != pset); /* - * If all else fails choose the current processor, - * this routine must return a running processor. + * Make sure that we pick a running processor, + * and that the correct processor set is locked. */ - processor = current_processor(); - if (cset != processor->processor_set) { - pset_unlock(cset); + do { + if (pmeta != PROCESSOR_META_NULL) { + if (cset != pmeta->primary->processor_set) { + pset_unlock(cset); - cset = processor->processor_set; - pset_lock(cset); - } + cset = pmeta->primary->processor_set; + pset_lock(cset); + } + + if (!queue_empty(&pmeta->idle_queue)) + return ((processor_t)queue_first(&pmeta->idle_queue)); + + pmeta = PROCESSOR_META_NULL; + } + + /* + * If we haven't been able to choose a processor, + * pick the boot processor and return it. + */ + if (processor == PROCESSOR_NULL) { + processor = master_processor; + + /* + * Check that the correct processor set is + * returned locked. + */ + if (cset != processor->processor_set) { + pset_unlock(cset); + + cset = processor->processor_set; + pset_lock(cset); + } + + return (processor); + } + + /* + * Check that the processor set for the chosen + * processor is locked. + */ + if (cset != processor->processor_set) { + pset_unlock(cset); + + cset = processor->processor_set; + pset_lock(cset); + } + + /* + * We must verify that the chosen processor is still available. + */ + if (processor->state == PROCESSOR_INACTIVE || + processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE) + processor = PROCESSOR_NULL; + } while (processor == PROCESSOR_NULL); return (processor); } @@ -2280,8 +3362,8 @@ thread_setrun( /* * Update priority if needed. */ - if (thread->sched_stamp != sched_tick) - update_priority(thread); + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); assert(thread->runq == PROCESSOR_NULL); @@ -2296,7 +3378,7 @@ thread_setrun( pset = thread->affinity_set->aset_pset; pset_lock(pset); - processor = choose_processor(pset, thread); + processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread); } else if (thread->last_processor != PROCESSOR_NULL) { @@ -2306,55 +3388,32 @@ thread_setrun( processor = thread->last_processor; pset = processor->processor_set; pset_lock(pset); + processor = SCHED(choose_processor)(pset, processor, thread); - /* - * Choose a different processor in certain cases. - */ - if (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE) - processor = choose_processor(pset, thread); - else - if (thread->sched_pri >= BASEPRI_RTQUEUES) { - /* - * If the processor is executing an RT thread with - * an earlier deadline, choose another. - */ - if (thread->sched_pri <= processor->current_pri || - thread->realtime.deadline >= processor->deadline) - processor = choose_processor(pset, thread); - } - else - if (processor->state != PROCESSOR_IDLE && pset->idle_count > 0) { - processor = choose_processor(pset, thread); - } - else { - processor_set_t nset = choose_next_pset(pset); - - /* - * Bump into a lesser loaded processor set if appropriate. - */ - if (pset != nset && (nset->low_hint == PROCESSOR_NULL || - (pset->idle_count == 0 && nset->idle_count > 0) || - processor->runq.count > nset->low_hint->runq.count)) { - pset_unlock(pset); - - pset = nset; - pset_lock(pset); - - processor = choose_processor(pset, thread); - } + if ((thread->last_processor != processor) && (thread->last_processor != PROCESSOR_NULL)) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LPA_BROKEN)|DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->last_processor->cpu_id, (uintptr_t)processor->cpu_id, thread->last_processor->state, 0); } + } else { /* * No Affinity case: * - * Choose a processor from the current processor set. + * Utilitize a per task hint to spread threads + * among the available processor sets. */ - processor = current_processor(); - pset = processor->processor_set; + task_t task = thread->task; + + pset = task->pset_hint; + if (pset == PROCESSOR_SET_NULL) + pset = current_processor()->processor_set; + + pset = choose_next_pset(pset); pset_lock(pset); - processor = choose_processor(pset, thread); + processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread); + task->pset_hint = processor->processor_set; } } else { @@ -2373,15 +3432,31 @@ thread_setrun( */ if (thread->sched_pri >= BASEPRI_RTQUEUES) realtime_setrun(processor, thread); + else if (thread->sched_mode == TH_MODE_FAIRSHARE) + fairshare_setrun(processor, thread); else processor_setrun(processor, thread, options); } +processor_set_t +task_choose_pset( + task_t task) +{ + processor_set_t pset = task->pset_hint; + + if (pset != PROCESSOR_SET_NULL) + pset = choose_next_pset(pset); + + return (pset); +} + +#if defined(CONFIG_SCHED_TRADITIONAL) + /* * processor_queue_shutdown: * - * Shutdown a processor run queue by moving - * non-bound threads to the current processor. + * Shutdown a processor run queue by + * re-dispatching non-bound threads. * * Associated pset must be locked, and is * returned unlocked. @@ -2391,7 +3466,7 @@ processor_queue_shutdown( processor_t processor) { processor_set_t pset = processor->processor_set; - run_queue_t rq = &processor->runq; + run_queue_t rq = runq_for_processor(processor); queue_t queue = rq->queues + rq->highq; int pri = rq->highq, count = rq->count; thread_t next, thread; @@ -2404,13 +3479,16 @@ processor_queue_shutdown( while (!queue_end(queue, (queue_entry_t)thread)) { next = (thread_t)queue_next((queue_entry_t)thread); - if (thread->bound_processor != processor) { - remqueue(queue, (queue_entry_t)thread); + if (thread->bound_processor == PROCESSOR_NULL) { + remqueue((queue_entry_t)thread); thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + runq_consider_decr_bound_count(processor, thread); rq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - rq->urgency--; + if (SCHED(priority_is_urgent)(pri)) { + rq->urgency--; assert(rq->urgency >= 0); + } if (queue_empty(queue)) { if (pri != IDLEPRI) clrbit(MAXPRI - pri, rq->bitmap); @@ -2429,83 +3507,59 @@ processor_queue_shutdown( pset_unlock(pset); - processor = current_processor(); - pset = processor->processor_set; - while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) { thread_lock(thread); - thread->last_processor = PROCESSOR_NULL; - - pset_lock(pset); - - processor_enqueue(processor, thread, SCHED_TAILQ); - pset_unlock(pset); + thread_setrun(thread, SCHED_TAILQ); thread_unlock(thread); } } +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* - * Check for a possible preemption point in - * the (current) thread. + * Check for a preemption point in + * the current context. * * Called at splsched. */ ast_t csw_check( - thread_t thread, processor_t processor) { - int current_pri = thread->sched_pri; ast_t result = AST_NONE; - run_queue_t runq; if (first_timeslice(processor)) { - runq = &rt_runq; - if (runq->highq >= BASEPRI_RTQUEUES) + if (rt_runq.count > 0) return (AST_PREEMPT | AST_URGENT); - if (runq->highq > current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); - - result |= AST_PREEMPT; - } - - runq = &processor->runq; - if (runq->highq > current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); - - result |= AST_PREEMPT; - } + result |= SCHED(processor_csw_check)(processor); + if (result & AST_URGENT) + return result; } else { - runq = &rt_runq; - if (runq->highq >= current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); - - result |= AST_PREEMPT; - } - - runq = &processor->runq; - if (runq->highq >= current_pri) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (rt_runq.count > 0 && BASEPRI_RTQUEUES >= processor->current_pri) + return (AST_PREEMPT | AST_URGENT); - result |= AST_PREEMPT; - } + result |= SCHED(processor_csw_check)(processor); + if (result & AST_URGENT) + return result; } if (result != AST_NONE) return (result); - if (thread->state & TH_SUSP) - result |= AST_PREEMPT; + if (SCHED(should_current_thread_rechoose_processor)(processor)) + return (AST_PREEMPT); + + if (machine_processor_is_inactive(processor)) + return (AST_PREEMPT); - return (result); + if (processor->active_thread->state & TH_SUSP) + return (AST_PREEMPT); + + return (AST_NONE); } /* @@ -2522,16 +3576,7 @@ set_sched_pri( thread_t thread, int priority) { - boolean_t removed = run_queue_remove(thread); - - if ( !(thread->sched_mode & TH_MODE_TIMESHARE) && - (priority >= BASEPRI_PREEMPT || - (thread->task_priority < MINPRI_KERNEL && - thread->task_priority >= BASEPRI_BACKGROUND && - priority > thread->task_priority) ) ) - thread->sched_mode |= TH_MODE_PREEMPT; - else - thread->sched_mode &= ~TH_MODE_PREEMPT; + boolean_t removed = thread_run_queue_remove(thread); thread->sched_pri = priority; if (removed) @@ -2541,11 +3586,12 @@ set_sched_pri( processor_t processor = thread->last_processor; if (thread == current_thread()) { - ast_t preempt = csw_check(thread, processor); + ast_t preempt; - if (preempt != AST_NONE) - ast_on(preempt); processor->current_pri = priority; + processor->current_thmode = thread->sched_mode; + if ((preempt = csw_check(processor)) != AST_NONE) + ast_on(preempt); } else if ( processor != PROCESSOR_NULL && @@ -2584,8 +3630,48 @@ run_queue_check( #endif /* DEBUG */ +#if defined(CONFIG_SCHED_TRADITIONAL) + +/* locks the runqueue itself */ + +static boolean_t +processor_queue_remove( + processor_t processor, + thread_t thread) +{ + void * rqlock; + run_queue_t rq; + + rqlock = &processor->processor_set->sched_lock; + rq = runq_for_processor(processor); + + simple_lock(rqlock); + if (processor == thread->runq) { + /* + * Thread is on a run queue and we have a lock on + * that run queue. + */ + runq_consider_decr_bound_count(processor, thread); + run_queue_remove(rq, thread); + } + else { + /* + * The thread left the run queue before we could + * lock the run queue. + */ + assert(thread->runq == PROCESSOR_NULL); + processor = PROCESSOR_NULL; + } + + simple_unlock(rqlock); + + return (processor != PROCESSOR_NULL); +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + /* - * run_queue_remove: + * thread_run_queue_remove: * * Remove a thread from a current run queue and * return TRUE if successful. @@ -2593,7 +3679,7 @@ run_queue_check( * Thread must be locked. */ boolean_t -run_queue_remove( +thread_run_queue_remove( thread_t thread) { processor_t processor = thread->runq; @@ -2605,41 +3691,32 @@ run_queue_remove( * and removed. */ if (processor != PROCESSOR_NULL) { - void * rqlock; - run_queue_t rq; + queue_t q; /* * The processor run queues are locked by the * processor set. Real-time priorities use a * global queue with a dedicated lock. */ - if (thread->sched_pri < BASEPRI_RTQUEUES) { - rqlock = &processor->processor_set->sched_lock; - rq = &processor->runq; + if (thread->sched_mode == TH_MODE_FAIRSHARE) { + return SCHED(fairshare_queue_remove)(thread); } - else { - rqlock = &rt_lock; rq = &rt_runq; + + if (thread->sched_pri < BASEPRI_RTQUEUES) { + return SCHED(processor_queue_remove)(processor, thread); } - simple_lock(rqlock); + simple_lock(&rt_lock); + q = &rt_runq.queue; if (processor == thread->runq) { /* * Thread is on a run queue and we have a lock on * that run queue. */ - remqueue(&rq->queues[0], (queue_entry_t)thread); - rq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - rq->urgency--; - assert(rq->urgency >= 0); - - if (queue_empty(rq->queues + thread->sched_pri)) { - /* update run queue status */ - if (thread->sched_pri != IDLEPRI) - clrbit(MAXPRI - thread->sched_pri, rq->bitmap); - rq->highq = MAXPRI - ffsbit(rq->bitmap); - } + remqueue((queue_entry_t)thread); + SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count); + rt_runq.count--; thread->runq = PROCESSOR_NULL; } @@ -2652,97 +3729,45 @@ run_queue_remove( processor = PROCESSOR_NULL; } - simple_unlock(rqlock); + simple_unlock(&rt_lock); } return (processor != PROCESSOR_NULL); } -/* - * choose_thread: - * - * Choose a thread to execute from the run queues - * and return it. May steal a thread from another - * processor. - * - * Called with pset scheduling lock and rt lock held, - * released on return. - */ -static thread_t -choose_thread( - processor_t processor) -{ - processor_set_t pset = processor->processor_set; - thread_t thread; - - if (processor->runq.count > 0 && processor->runq.highq >= rt_runq.highq) { - simple_unlock(&rt_lock); - - pset_hint_low(pset, processor); - - if (pset->high_hint != PROCESSOR_NULL) { - if (processor != pset->high_hint) { - if (processor->runq.count >= pset->high_hint->runq.count) - pset->high_hint = processor; - else - if (pset->high_hint->runq.highq > processor->runq.highq) { - thread = steal_thread(pset->high_hint); - if (thread != THREAD_NULL) { - processor->deadline = UINT64_MAX; - pset_unlock(pset); - - return (thread); - } - } - } - } - else - pset->high_hint = processor; - - thread = run_queue_dequeue(&processor->runq, SCHED_HEADQ); - - processor->deadline = UINT64_MAX; - pset_unlock(pset); - - return (thread); - } - - thread = run_queue_dequeue(&rt_runq, SCHED_HEADQ); - simple_unlock(&rt_lock); - - processor->deadline = thread->realtime.deadline; - pset_unlock(pset); - - return (thread); -} +#if defined(CONFIG_SCHED_TRADITIONAL) /* - * steal_thread: + * steal_processor_thread: * - * Steal a thread from a processor and return it. + * Locate a thread to steal from the processor and + * return it. * * Associated pset must be locked. Returns THREAD_NULL * on failure. */ static thread_t -steal_thread( +steal_processor_thread( processor_t processor) { - run_queue_t rq = &processor->runq; + run_queue_t rq = runq_for_processor(processor); queue_t queue = rq->queues + rq->highq; int pri = rq->highq, count = rq->count; - thread_t thread = THREAD_NULL; + thread_t thread; while (count > 0) { thread = (thread_t)queue_first(queue); while (!queue_end(queue, (queue_entry_t)thread)) { - if (thread->bound_processor != processor) { - remqueue(queue, (queue_entry_t)thread); + if (thread->bound_processor == PROCESSOR_NULL) { + remqueue((queue_entry_t)thread); thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); + runq_consider_decr_bound_count(processor, thread); rq->count--; - if (thread->sched_mode & TH_MODE_PREEMPT) - rq->urgency--; + if (SCHED(priority_is_urgent)(pri)) { + rq->urgency--; assert(rq->urgency >= 0); + } if (queue_empty(queue)) { if (pri != IDLEPRI) clrbit(MAXPRI - pri, rq->bitmap); @@ -2762,6 +3787,106 @@ steal_thread( return (THREAD_NULL); } +/* + * Locate and steal a thread, beginning + * at the pset. + * + * The pset must be locked, and is returned + * unlocked. + * + * Returns the stolen thread, or THREAD_NULL on + * failure. + */ +static thread_t +steal_thread( + processor_set_t pset) +{ + processor_set_t nset, cset = pset; + processor_t processor; + thread_t thread; + + do { + processor = (processor_t)queue_first(&cset->active_queue); + while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) { + if (runq_for_processor(processor)->count > 0) { + thread = steal_processor_thread(processor); + if (thread != THREAD_NULL) { + remqueue((queue_entry_t)processor); + enqueue_tail(&cset->active_queue, (queue_entry_t)processor); + + pset_unlock(cset); + + return (thread); + } + } + + processor = (processor_t)queue_next((queue_entry_t)processor); + } + + nset = next_pset(cset); + + if (nset != pset) { + pset_unlock(cset); + + cset = nset; + pset_lock(cset); + } + } while (nset != pset); + + pset_unlock(cset); + + return (THREAD_NULL); +} + +static thread_t steal_thread_disabled( + processor_set_t pset) +{ + pset_unlock(pset); + + return (THREAD_NULL); +} + +#endif /* CONFIG_SCHED_TRADITIONAL */ + + +int +thread_get_urgency(uint64_t *rt_period, uint64_t *rt_deadline) +{ + processor_t processor; + thread_t thread; + + processor = current_processor(); + + thread = processor->next_thread; + + if (thread != NULL) { + if (thread->sched_mode == TH_MODE_REALTIME) { + + if (rt_period != NULL) + *rt_period = thread->realtime.period; + if (rt_deadline != NULL) + *rt_deadline = thread->realtime.deadline; + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_REAL_TIME, thread->realtime.period, + (thread->realtime.deadline >> 32), thread->realtime.deadline, 0); + + return (THREAD_URGENCY_REAL_TIME); + } else if ((thread->sched_pri <= MAXPRI_THROTTLE) && + (thread->priority <= MAXPRI_THROTTLE)) { + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_BACKGROUND, thread->sched_pri, thread->priority, 0, 0); + return (THREAD_URGENCY_BACKGROUND); + } + else + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_NORMAL, 0, 0, 0, 0); + + return (THREAD_URGENCY_NORMAL); + } + else + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_NONE, 0, 0, 0, 0); + return (THREAD_URGENCY_NONE); +} + + /* * This is the processor idle loop, which just looks for other threads * to execute. Processor idle threads invoke this without supplying a @@ -2769,7 +3894,14 @@ steal_thread( * * Returns a the next thread to execute if dispatched directly. */ -static thread_t + +#if 0 +#define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__) +#else +#define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0) +#endif + +thread_t processor_idle( thread_t thread, processor_t processor) @@ -2777,49 +3909,39 @@ processor_idle( processor_set_t pset = processor->processor_set; thread_t new_thread; int state; - (void)splsched(); -#ifdef __ppc__ - pmsDown(); /* Step power down */ -#endif - KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START, (int)thread, 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START, (uintptr_t)thread_tid(thread), 0, 0, 0, 0); + + SCHED_STATS_CPU_IDLE_START(processor); timer_switch(&PROCESSOR_DATA(processor, system_state), mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state)); PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state); - while (processor->next_thread == THREAD_NULL && processor->runq.count == 0 && + while (processor->next_thread == THREAD_NULL && SCHED(processor_queue_empty)(processor) && rt_runq.count == 0 && SCHED(fairshare_runq_count)() == 0 && (thread == THREAD_NULL || ((thread->state & (TH_WAIT|TH_SUSP)) == TH_WAIT && !thread->wake_active))) { - volatile processor_t hint; + IDLE_KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -1, 0); machine_idle(); (void)splsched(); - if (pset->low_hint == PROCESSOR_NULL) - break; + IDLE_KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -2, 0); - hint = pset->high_hint; - if (hint != PROCESSOR_NULL && hint->runq.count > 0) + if (processor->state == PROCESSOR_INACTIVE && !machine_processor_is_inactive(processor)) break; } - KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, 0, 0, 0, 0); - timer_switch(&PROCESSOR_DATA(processor, idle_state), mach_absolute_time(), &PROCESSOR_DATA(processor, system_state)); PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state); pset_lock(pset); -#ifdef __ppc__ - pmsStep(0); /* Step up out of idle power */ -#endif - state = processor->state; if (state == PROCESSOR_DISPATCHING) { /* @@ -2829,30 +3951,41 @@ processor_idle( processor->next_thread = THREAD_NULL; processor->state = PROCESSOR_RUNNING; - if ( processor->runq.highq > new_thread->sched_pri || - rt_runq.highq >= new_thread->sched_pri ) { + if (SCHED(processor_queue_has_priority)(processor, new_thread->sched_pri, FALSE) || + (rt_runq.count > 0 && BASEPRI_RTQUEUES >= new_thread->sched_pri) ) { processor->deadline = UINT64_MAX; pset_unlock(pset); thread_lock(new_thread); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REDISPATCH), (uintptr_t)thread_tid(new_thread), new_thread->sched_pri, rt_runq.count, 0, 0); thread_setrun(new_thread, SCHED_HEADQ); thread_unlock(new_thread); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0); + return (THREAD_NULL); } pset_unlock(pset); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0); + return (new_thread); } else if (state == PROCESSOR_IDLE) { - remqueue(&pset->idle_queue, (queue_entry_t)processor); - pset->idle_count--; + remqueue((queue_entry_t)processor); processor->state = PROCESSOR_RUNNING; - enqueue_head(&pset->active_queue, (queue_entry_t)processor); + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); + } + else + if (state == PROCESSOR_INACTIVE) { + processor->state = PROCESSOR_RUNNING; + enqueue_tail(&pset->active_queue, (queue_entry_t)processor); } else if (state == PROCESSOR_SHUTDOWN) { @@ -2870,15 +4003,26 @@ processor_idle( thread_setrun(new_thread, SCHED_HEADQ); thread_unlock(new_thread); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0); + return (THREAD_NULL); } } pset_unlock(pset); + KERNEL_DEBUG_CONSTANT( + MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0); + return (THREAD_NULL); } +/* + * Each processor has a dedicated thread which + * executes the idle loop when there is no suitable + * previous context. + */ void idle_thread(void) { @@ -2921,8 +4065,6 @@ idle_thread_create( return (KERN_SUCCESS); } -static uint64_t sched_tick_deadline; - /* * sched_startup: * @@ -2936,14 +4078,16 @@ sched_startup(void) kern_return_t result; thread_t thread; - result = kernel_thread_start_priority((thread_continue_t)sched_tick_thread, NULL, MAXPRI_KERNEL, &thread); + result = kernel_thread_start_priority((thread_continue_t)sched_init_thread, + (void *)SCHED(maintenance_continuation), + MAXPRI_KERNEL, &thread); if (result != KERN_SUCCESS) panic("sched_startup"); thread_deallocate(thread); /* - * Yield to the sched_tick_thread while it times + * Yield to the sched_init_thread while it times * a series of context switches back. It stores * the baseline value in sched_cswtime. * @@ -2952,20 +4096,20 @@ sched_startup(void) */ while (sched_cswtime == 0) thread_block(THREAD_CONTINUE_NULL); +} - thread_daemon_init(); +#if defined(CONFIG_SCHED_TRADITIONAL) - thread_call_initialize(); -} +static uint64_t sched_tick_deadline = 0; /* - * sched_tick_thread: + * sched_init_thread: * * Perform periodic bookkeeping functions about ten * times per second. */ static void -sched_tick_continue(void) +sched_traditional_tick_continue(void) { uint64_t abstime = mach_absolute_time(); @@ -2982,17 +4126,44 @@ sched_tick_continue(void) */ thread_update_scan(); - if (pm_tick_callout != NULL) - (*pm_tick_callout)(); - + if (sched_tick_deadline == 0) + sched_tick_deadline = abstime; + clock_deadline_for_periodic_event(sched_tick_interval, abstime, &sched_tick_deadline); - assert_wait_deadline((event_t)sched_tick_thread, THREAD_UNINT, sched_tick_deadline); - thread_block((thread_continue_t)sched_tick_continue); + assert_wait_deadline((event_t)sched_traditional_tick_continue, THREAD_UNINT, sched_tick_deadline); + thread_block((thread_continue_t)sched_traditional_tick_continue); /*NOTREACHED*/ } +#endif /* CONFIG_SCHED_TRADITIONAL */ + +static uint32_t +time_individual_cswitch(void) +{ + uint32_t switches = 0; + uint64_t newtime, starttime; + + /* Wait for absolute time to increase. */ + starttime = mach_absolute_time(); + do { + newtime = mach_absolute_time(); + } while (newtime == starttime); + + /* Measure one or more context switches until time increases again. + * This ensures we get non-zero timings even if absolute time + * increases very infrequently compared to CPU clock. */ + starttime = newtime; + do { + thread_block(THREAD_CONTINUE_NULL); + newtime = mach_absolute_time(); + ++switches; + } while (newtime == starttime); + /* Round up. */ + return (uint32_t) ((newtime - starttime + switches - 1) / switches); +} + /* * Time a series of context switches to determine * a baseline. Toss the high and low and return @@ -3002,15 +4173,11 @@ static uint32_t time_cswitch(void) { uint32_t new, hi, low, accum; - uint64_t abstime; - int i, tries = 7; + int i, tries = 7, denom; accum = hi = low = 0; for (i = 0; i < tries; ++i) { - abstime = mach_absolute_time(); - thread_block(THREAD_CONTINUE_NULL); - - new = mach_absolute_time() - abstime; + new = time_individual_cswitch(); if (i == 0) accum = hi = low = new; @@ -3023,21 +4190,24 @@ time_cswitch(void) accum += new; } } - - return ((accum - hi - low) / (2 * (tries - 2))); + /* Round up. */ + denom = 2 * (tries - 2); + return (accum - hi - low + denom - 1) / denom; } void -sched_tick_thread(void) +sched_init_thread(void (*continuation)(void)) { sched_cswtime = time_cswitch(); + assert(sched_cswtime > 0); - sched_tick_deadline = mach_absolute_time(); + continuation(); - sched_tick_continue(); /*NOTREACHED*/ } +#if defined(CONFIG_SCHED_TRADITIONAL) + /* * thread_update_scan / runq_scan: * @@ -3077,7 +4247,7 @@ runq_scan( while (count > 0) { queue_iterate(q, thread, thread_t, links) { if ( thread->sched_stamp != sched_tick && - (thread->sched_mode & TH_MODE_TIMESHARE) ) { + (thread->sched_mode == TH_MODE_TIMESHARE) ) { if (thread_update_count == THREAD_UPDATE_SIZE) return (TRUE); @@ -3111,7 +4281,7 @@ thread_update_scan(void) s = splsched(); pset_lock(pset); - restart_needed = runq_scan(&processor->runq); + restart_needed = runq_scan(runq_for_processor(processor)); pset_unlock(pset); splx(s); @@ -3140,9 +4310,10 @@ thread_update_scan(void) s = splsched(); thread_lock(thread); - if ( !(thread->state & (TH_WAIT|TH_SUSP)) && - thread->sched_stamp != sched_tick ) - update_priority(thread); + if ( !(thread->state & (TH_WAIT)) ) { + if (SCHED(can_update_priority)(thread)) + SCHED(update_priority)(thread); + } thread_unlock(thread); splx(s); @@ -3150,20 +4321,115 @@ thread_update_scan(void) } } while (restart_needed); } + +#endif /* CONFIG_SCHED_TRADITIONAL */ + +boolean_t +thread_eager_preemption(thread_t thread) +{ + return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0); +} + +void +thread_set_eager_preempt(thread_t thread) +{ + spl_t x; + processor_t p; + ast_t ast = AST_NONE; + + x = splsched(); + p = current_processor(); + + thread_lock(thread); + thread->sched_flags |= TH_SFLAG_EAGERPREEMPT; + + if (thread == current_thread()) { + thread_unlock(thread); + + ast = csw_check(p); + if (ast != AST_NONE) { + (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast); + } + } else { + p = thread->last_processor; + + if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING && + p->active_thread == thread) { + cause_ast_check(p); + } + thread_unlock(thread); + } + + splx(x); +} + +void +thread_clear_eager_preempt(thread_t thread) +{ + spl_t x; + + x = splsched(); + thread_lock(thread); + + thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT; + + thread_unlock(thread); + splx(x); +} +/* + * Scheduling statistics + */ +void +sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri) +{ + struct processor_sched_statistics *stats; + boolean_t to_realtime = FALSE; + + stats = &processor->processor_data.sched_stats; + stats->csw_count++; + + if (otherpri >= BASEPRI_REALTIME) { + stats->rt_sched_count++; + to_realtime = TRUE; + } + + if ((reasons & AST_PREEMPT) != 0) { + stats->preempt_count++; + + if (selfpri >= BASEPRI_REALTIME) { + stats->preempted_rt_count++; + } + + if (to_realtime) { + stats->preempted_by_rt_count++; + } + + } +} + +void +sched_stats_handle_runq_change(struct runq_stats *stats, int old_count) +{ + uint64_t timestamp = mach_absolute_time(); + + stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count; + stats->last_change_timestamp = timestamp; +} + /* - * Just in case someone doesn't use the macro + * For calls from assembly code */ -#undef thread_wakeup +#undef thread_wakeup void thread_wakeup( - event_t x); + event_t x); void thread_wakeup( - event_t x) + event_t x) { - thread_wakeup_with_result(x, THREAD_AWAKENED); + thread_wakeup_with_result(x, THREAD_AWAKENED); } boolean_t