/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_FREE_COPYRIGHT@
*/
#include <debug.h>
-#include <cpus.h>
-#include <mach_kdb.h>
-#include <simple_clock.h>
-#include <power_save.h>
-#include <task_swapper.h>
-#include <ddb/db_output.h>
+#include <mach/mach_types.h>
#include <mach/machine.h>
+#include <mach/policy.h>
+#include <mach/sync_policy.h>
+#include <mach/thread_act.h>
+
#include <machine/machine_routines.h>
#include <machine/sched_param.h>
-#include <kern/ast.h>
+#include <machine/machine_cpu.h>
+#include <machine/machlimits.h>
+
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+#include <machine/commpage.h>
+#endif
+
+#include <kern/kern_types.h>
#include <kern/clock.h>
#include <kern/counters.h>
#include <kern/cpu_number.h>
#include <kern/cpu_data.h>
-#include <kern/etap_macros.h>
-#include <kern/lock.h>
+#include <kern/smp.h>
+#include <kern/debug.h>
#include <kern/macro_help.h>
#include <kern/machine.h>
#include <kern/misc_protos.h>
#include <kern/queue.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
+#include <kern/sfi.h>
#include <kern/syscall_subr.h>
#include <kern/task.h>
#include <kern/thread.h>
-#include <kern/thread_swap.h>
+#include <kern/ledger.h>
+#include <kern/timer_queue.h>
+#include <kern/waitq.h>
+
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
-#include <mach/policy.h>
-#include <mach/sync_policy.h>
-#include <kern/sf.h>
-#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
+
+#include <mach/sdt.h>
+
#include <sys/kdebug.h>
-#if TASK_SWAPPER
-#include <kern/task_swap.h>
-extern int task_swap_on;
-#endif /* TASK_SWAPPER */
+#include <kern/pms.h>
+
+#if defined(CONFIG_TELEMETRY) && defined(CONFIG_SCHED_TIMESHARE_CORE)
+#include <kern/telemetry.h>
+#endif
+
+struct rt_queue rt_runq;
+
+uintptr_t sched_thread_on_rt_queue = (uintptr_t)0xDEAFBEE0;
-extern int hz;
+/* Lock RT runq, must be done with interrupts disabled (under splsched()) */
+#if __SMP__
+decl_simple_lock_data(static,rt_lock);
+#define rt_lock_init() simple_lock_init(&rt_lock, 0)
+#define rt_lock_lock() simple_lock(&rt_lock)
+#define rt_lock_unlock() simple_unlock(&rt_lock)
+#else
+#define rt_lock_init() do { } while(0)
+#define rt_lock_lock() do { } while(0)
+#define rt_lock_unlock() do { } while(0)
+#endif
-#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
+#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
-#define NO_KERNEL_PREEMPT 0
-#define KERNEL_PREEMPT 1
-int kernel_preemption_mode = KERNEL_PREEMPT;
+#define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
+int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
+
+#define MAX_UNSAFE_QUANTA 800
+int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
+
+#define MAX_POLL_QUANTA 2
+int max_poll_quanta = MAX_POLL_QUANTA;
+
+#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
+int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
+
+uint64_t max_poll_computation;
+
+uint64_t max_unsafe_computation;
+uint64_t sched_safe_duration;
+
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
-int min_quantum;
-natural_t min_quantum_ms;
+uint32_t std_quantum;
+uint32_t min_std_quantum;
+uint32_t bg_quantum;
+
+uint32_t std_quantum_us;
+uint32_t bg_quantum_us;
+
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+uint32_t thread_depress_time;
+uint32_t default_timeshare_computation;
+uint32_t default_timeshare_constraint;
+
+uint32_t max_rt_quantum;
+uint32_t min_rt_quantum;
+
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
unsigned sched_tick;
+uint32_t sched_tick_interval;
+#if defined(CONFIG_TELEMETRY)
+uint32_t sched_telemetry_interval;
+#endif /* CONFIG_TELEMETRY */
+
+uint32_t sched_pri_shift = INT8_MAX;
+uint32_t sched_background_pri_shift = INT8_MAX;
+uint32_t sched_combined_fgbg_pri_shift = INT8_MAX;
+uint32_t sched_fixed_shift;
+uint32_t sched_use_combined_fgbg_decay = 0;
+
+uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
+
+/* Allow foreground to decay past default to resolve inversions */
+#define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
+int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
-#if SIMPLE_CLOCK
-int sched_usec;
-#endif /* SIMPLE_CLOCK */
+/* Defaults for timer deadline profiling */
+#define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
+ * 2ms */
+#define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
+ <= 5ms */
+
+uint64_t timer_deadline_tracking_bin_1;
+uint64_t timer_deadline_tracking_bin_2;
+
+thread_t sched_maintenance_thread;
+
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+uint64_t sched_one_second_interval;
+
+uint32_t sched_run_count, sched_share_count, sched_background_count;
+uint32_t sched_load_average, sched_mach_factor;
/* Forwards */
-void thread_continue(thread_t);
-void wait_queues_init(void);
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+
+static void load_shift_init(void);
+static void preempt_pri_init(void);
+
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+static thread_t thread_select(
+ thread_t thread,
+ processor_t processor,
+ ast_t reason);
-void set_pri(
- thread_t thread,
- int pri,
- int resched);
+#if CONFIG_SCHED_IDLE_IN_PLACE
+static thread_t thread_select_idle(
+ thread_t thread,
+ processor_t processor);
+#endif
+
+thread_t processor_idle(
+ thread_t thread,
+ processor_t processor);
-thread_t choose_pset_thread(
- processor_t myprocessor,
- processor_set_t pset);
+ast_t
+csw_check_locked( processor_t processor,
+ processor_set_t pset,
+ ast_t check_reason);
-thread_t choose_thread(
- processor_t myprocessor);
+static void processor_setrun(
+ processor_t processor,
+ thread_t thread,
+ integer_t options);
-int run_queue_enqueue(
- run_queue_t runq,
- thread_t thread,
- boolean_t tail);
+static void
+sched_realtime_init(void);
-void idle_thread_continue(void);
-void do_thread_scan(void);
+static void
+sched_realtime_timebase_init(void);
-void clear_wait_internal(
- thread_t thread,
- int result);
+static void
+sched_timer_deadline_tracking_init(void);
#if DEBUG
-void dump_run_queues(
- run_queue_t rq);
-void dump_run_queue_struct(
- run_queue_t rq);
-void dump_processor(
- processor_t p);
-void dump_processor_set(
- processor_set_t ps);
-
-void checkrq(
- run_queue_t rq,
- char *msg);
-
-void thread_check(
- thread_t thread,
- run_queue_t runq);
-#endif /*DEBUG*/
-
-boolean_t thread_runnable(
- thread_t thread);
+extern int debug_task;
+#define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
+#else
+#define TLOG(a, fmt, args...) do {} while (0)
+#endif
-/*
- * State machine
- *
- * states are combinations of:
- * R running
- * W waiting (or on wait queue)
- * N non-interruptible
- * O swapped out
- * I being swapped in
- *
- * init action
- * assert_wait thread_block clear_wait swapout swapin
- *
- * R RW, RWN R; setrun - -
- * RN RWN RN; setrun - -
- *
- * RW W R -
- * RWN WN RN -
- *
- * W R; setrun WO
- * WN RN; setrun -
- *
- * RO - - R
- *
- */
+static processor_t
+thread_bind_internal(
+ thread_t thread,
+ processor_t processor);
+
+static void
+sched_vm_group_maintenance(void);
+
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+int8_t sched_load_shifts[NRQS];
+int sched_preempt_pri[NRQBM];
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+const struct sched_dispatch_table *sched_current_dispatch = NULL;
/*
- * Waiting protocols and implementation:
- *
- * Each thread may be waiting for exactly one event; this event
- * is set using assert_wait(). That thread may be awakened either
- * by performing a thread_wakeup_prim() on its event,
- * or by directly waking that thread up with clear_wait().
- *
- * The implementation of wait events uses a hash table. Each
- * bucket is queue of threads having the same hash function
- * value; the chain for the queue (linked list) is the run queue
- * field. [It is not possible to be waiting and runnable at the
- * same time.]
- *
- * Locks on both the thread and on the hash buckets govern the
- * wait event field and the queue chain field. Because wakeup
- * operations only have the event as an argument, the event hash
- * bucket must be locked before any thread.
+ * Statically allocate a buffer to hold the longest possible
+ * scheduler description string, as currently implemented.
+ * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
+ * to export to userspace via sysctl(3). If either version
+ * changes, update the other.
*
- * Scheduling operations may also occur at interrupt level; therefore,
- * interrupts below splsched() must be prevented when holding
- * thread or hash bucket locks.
- *
- * The wait event hash table declarations are as follows:
+ * Note that in addition to being an upper bound on the strings
+ * in the kernel, it's also an exact parameter to PE_get_default(),
+ * which interrogates the device tree on some platforms. That
+ * API requires the caller know the exact size of the device tree
+ * property, so we need both a legacy size (32) and the current size
+ * (48) to deal with old and new device trees. The device tree property
+ * is similarly padded to a fixed size so that the same kernel image
+ * can run on multiple devices with different schedulers configured
+ * in the device tree.
*/
+char sched_string[SCHED_STRING_MAX_LENGTH];
-#define NUMQUEUES 59
-
-struct wait_queue wait_queues[NUMQUEUES];
+uint32_t sched_debug_flags;
-#define wait_hash(event) \
- ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
+/* Global flag which indicates whether Background Stepper Context is enabled */
+static int cpu_throttle_enabled = 1;
void
sched_init(void)
+{
+ char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' };
+
+ /* Check for runtime selection of the scheduler algorithm */
+ if (!PE_parse_boot_argn("sched", sched_arg, sizeof (sched_arg))) {
+ /* If no boot-args override, look in device tree */
+ if (!PE_get_default("kern.sched", sched_arg,
+ SCHED_STRING_MAX_LENGTH)) {
+ sched_arg[0] = '\0';
+ }
+ }
+
+
+ if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
+ /* No boot-args, check in device tree */
+ if (!PE_get_default("kern.sched_pri_decay_limit",
+ &sched_pri_decay_band_limit,
+ sizeof(sched_pri_decay_band_limit))) {
+ /* Allow decay all the way to normal limits */
+ sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
+ }
+ }
+
+ kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
+
+ if (strlen(sched_arg) > 0) {
+ if (0) {
+ /* Allow pattern below */
+#if defined(CONFIG_SCHED_TRADITIONAL)
+ } else if (0 == strcmp(sched_arg, sched_traditional_dispatch.sched_name)) {
+ sched_current_dispatch = &sched_traditional_dispatch;
+ } else if (0 == strcmp(sched_arg, sched_traditional_with_pset_runqueue_dispatch.sched_name)) {
+ sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
+#endif
+#if defined(CONFIG_SCHED_PROTO)
+ } else if (0 == strcmp(sched_arg, sched_proto_dispatch.sched_name)) {
+ sched_current_dispatch = &sched_proto_dispatch;
+#endif
+#if defined(CONFIG_SCHED_GRRR)
+ } else if (0 == strcmp(sched_arg, sched_grrr_dispatch.sched_name)) {
+ sched_current_dispatch = &sched_grrr_dispatch;
+#endif
+#if defined(CONFIG_SCHED_MULTIQ)
+ } else if (0 == strcmp(sched_arg, sched_multiq_dispatch.sched_name)) {
+ sched_current_dispatch = &sched_multiq_dispatch;
+ } else if (0 == strcmp(sched_arg, sched_dualq_dispatch.sched_name)) {
+ sched_current_dispatch = &sched_dualq_dispatch;
+#endif
+ } else {
+#if defined(CONFIG_SCHED_TRADITIONAL)
+ printf("Unrecognized scheduler algorithm: %s\n", sched_arg);
+ printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch.sched_name);
+ sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
+#else
+ panic("Unrecognized scheduler algorithm: %s", sched_arg);
+#endif
+ }
+ kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name));
+ } else {
+#if defined(CONFIG_SCHED_MULTIQ)
+ sched_current_dispatch = &sched_multiq_dispatch;
+#elif defined(CONFIG_SCHED_TRADITIONAL)
+ sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
+#elif defined(CONFIG_SCHED_PROTO)
+ sched_current_dispatch = &sched_proto_dispatch;
+#elif defined(CONFIG_SCHED_GRRR)
+ sched_current_dispatch = &sched_grrr_dispatch;
+#else
+#error No default scheduler implementation
+#endif
+ kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
+ }
+
+ strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
+
+ if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
+ kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
+ }
+
+ SCHED(init)();
+ sched_realtime_init();
+ ast_init();
+ sched_timer_deadline_tracking_init();
+
+ SCHED(pset_init)(&pset0);
+ SCHED(processor_init)(master_processor);
+}
+
+void
+sched_timebase_init(void)
+{
+ uint64_t abstime;
+
+ clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
+ sched_one_second_interval = abstime;
+
+ SCHED(timebase_init)();
+ sched_realtime_timebase_init();
+}
+
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+
+void
+sched_timeshare_init(void)
{
/*
- * Calculate the minimum quantum
- * in ticks.
+ * Calculate the timeslicing quantum
+ * in us.
*/
if (default_preemption_rate < 1)
default_preemption_rate = DEFAULT_PREEMPTION_RATE;
- min_quantum = hz / default_preemption_rate;
+ std_quantum_us = (1000 * 1000) / default_preemption_rate;
- /*
- * Round up result (4/5) to an
- * integral number of ticks.
- */
- if (((hz * 10) / default_preemption_rate) - (min_quantum * 10) >= 5)
- min_quantum++;
- if (min_quantum < 1)
- min_quantum = 1;
+ printf("standard timeslicing quantum is %d us\n", std_quantum_us);
- min_quantum_ms = (1000 / hz) * min_quantum;
+ if (default_bg_preemption_rate < 1)
+ default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
+ bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
- printf("scheduling quantum is %d ms\n", min_quantum_ms);
+ printf("standard background quantum is %d us\n", bg_quantum_us);
- wait_queues_init();
- pset_sys_bootstrap(); /* initialize processor mgmt. */
- processor_action();
+ load_shift_init();
+ preempt_pri_init();
sched_tick = 0;
-#if SIMPLE_CLOCK
- sched_usec = 0;
-#endif /* SIMPLE_CLOCK */
- ast_init();
- sf_init();
}
void
-wait_queues_init(void)
+sched_timeshare_timebase_init(void)
{
- register int i;
+ uint64_t abstime;
+ uint32_t shift;
+
+ /* standard timeslicing quantum */
+ clock_interval_to_absolutetime_interval(
+ std_quantum_us, NSEC_PER_USEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
+ std_quantum = (uint32_t)abstime;
+
+ /* smallest remaining quantum (250 us) */
+ clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
+ min_std_quantum = (uint32_t)abstime;
+
+ /* quantum for background tasks */
+ clock_interval_to_absolutetime_interval(
+ bg_quantum_us, NSEC_PER_USEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
+ bg_quantum = (uint32_t)abstime;
+
+ /* scheduler tick interval */
+ clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
+ NSEC_PER_USEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
+ sched_tick_interval = (uint32_t)abstime;
- for (i = 0; i < NUMQUEUES; i++) {
- wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
- }
+ /*
+ * Compute conversion factor from usage to
+ * timesharing priorities with 5/8 ** n aging.
+ */
+ abstime = (abstime * 5) / 3;
+ for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift)
+ abstime >>= 1;
+ sched_fixed_shift = shift;
+
+ max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
+ sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
+
+ max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
+ thread_depress_time = 1 * std_quantum;
+ default_timeshare_computation = std_quantum / 2;
+ default_timeshare_constraint = std_quantum;
+
+#if defined(CONFIG_TELEMETRY)
+ /* interval for high frequency telemetry */
+ clock_interval_to_absolutetime_interval(10, NSEC_PER_MSEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
+ sched_telemetry_interval = (uint32_t)abstime;
+#endif
}
-/*
- * Thread timeout routine, called when timer expires.
- */
-void
-thread_timer_expire(
- timer_call_param_t p0,
- timer_call_param_t p1)
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+static void
+sched_realtime_init(void)
{
- thread_t thread = p0;
- spl_t s;
+ rt_lock_init();
+
+ rt_runq.count = 0;
+ queue_init(&rt_runq.queue);
+}
+
+static void
+sched_realtime_timebase_init(void)
+{
+ uint64_t abstime;
+
+ /* smallest rt computaton (50 us) */
+ clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
+ min_rt_quantum = (uint32_t)abstime;
+
+ /* maximum rt computation (50 ms) */
+ clock_interval_to_absolutetime_interval(
+ 50, 1000*NSEC_PER_USEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
+ max_rt_quantum = (uint32_t)abstime;
- s = splsched();
- wake_lock(thread);
- if ( thread->wait_timer_is_set &&
- !timer_call_is_delayed(&thread->wait_timer, NULL) ) {
- thread->wait_timer_active--;
- thread->wait_timer_is_set = FALSE;
- thread_lock(thread);
- if (thread->active)
- clear_wait_internal(thread, THREAD_TIMED_OUT);
- thread_unlock(thread);
- }
- else
- if (--thread->wait_timer_active == 0)
- thread_wakeup_one(&thread->wait_timer_active);
- wake_unlock(thread);
- splx(s);
}
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+
/*
- * thread_set_timer:
- *
- * Set a timer for the current thread, if the thread
- * is ready to wait. Must be called between assert_wait()
- * and thread_block().
+ * Set up values for timeshare
+ * loading factors.
*/
-void
-thread_set_timer(
- natural_t interval,
- natural_t scale_factor)
+static void
+load_shift_init(void)
{
- thread_t thread = current_thread();
- AbsoluteTime deadline;
- spl_t s;
+ int8_t k, *p = sched_load_shifts;
+ uint32_t i, j;
- s = splsched();
- wake_lock(thread);
- thread_lock(thread);
- if ((thread->state & TH_WAIT) != 0) {
- clock_interval_to_deadline(interval, scale_factor, &deadline);
- timer_call_enter(&thread->wait_timer, deadline);
- assert(!thread->wait_timer_is_set);
- thread->wait_timer_active++;
- thread->wait_timer_is_set = TRUE;
+ uint32_t sched_decay_penalty = 1;
+
+ if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof (sched_decay_penalty))) {
+ kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
}
- thread_unlock(thread);
- wake_unlock(thread);
- splx(s);
+
+ if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof (sched_decay_usage_age_factor))) {
+ kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
+ }
+
+ if (PE_parse_boot_argn("sched_use_combined_fgbg_decay", &sched_use_combined_fgbg_decay, sizeof (sched_use_combined_fgbg_decay))) {
+ kprintf("Overriding schedule fg/bg decay calculation: %u\n", sched_use_combined_fgbg_decay);
+ }
+
+ if (sched_decay_penalty == 0) {
+ /*
+ * There is no penalty for timeshare threads for using too much
+ * CPU, so set all load shifts to INT8_MIN. Even under high load,
+ * sched_pri_shift will be >INT8_MAX, and there will be no
+ * penalty applied to threads (nor will sched_usage be updated per
+ * thread).
+ */
+ for (i = 0; i < NRQS; i++) {
+ sched_load_shifts[i] = INT8_MIN;
+ }
+
+ return;
+ }
+
+ *p++ = INT8_MIN; *p++ = 0;
+
+ /*
+ * For a given system load "i", the per-thread priority
+ * penalty per quantum of CPU usage is ~2^k priority
+ * levels. "sched_decay_penalty" can cause more
+ * array entries to be filled with smaller "k" values
+ */
+ for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
+ for (j <<= 1; (i < j) && (i < NRQS); ++i)
+ *p++ = k;
+ }
+}
+
+static void
+preempt_pri_init(void)
+{
+ int i, *p = sched_preempt_pri;
+
+ for (i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i)
+ setbit(i, p);
+
+ for (i = BASEPRI_PREEMPT; i <= MAXPRI; ++i)
+ setbit(i, p);
}
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+/*
+ * Thread wait timer expiration.
+ */
void
-thread_set_timer_deadline(
- AbsoluteTime deadline)
+thread_timer_expire(
+ void *p0,
+ __unused void *p1)
{
- thread_t thread = current_thread();
+ thread_t thread = p0;
spl_t s;
s = splsched();
- wake_lock(thread);
thread_lock(thread);
- if ((thread->state & TH_WAIT) != 0) {
- timer_call_enter(&thread->wait_timer, deadline);
- assert(!thread->wait_timer_is_set);
- thread->wait_timer_active++;
- thread->wait_timer_is_set = TRUE;
+ if (--thread->wait_timer_active == 0) {
+ if (thread->wait_timer_is_set) {
+ thread->wait_timer_is_set = FALSE;
+ clear_wait_internal(thread, THREAD_TIMED_OUT);
+ }
}
thread_unlock(thread);
- wake_unlock(thread);
splx(s);
}
-void
-thread_cancel_timer(void)
+/*
+ * thread_unblock:
+ *
+ * Unblock thread on wake up.
+ *
+ * Returns TRUE if the thread should now be placed on the runqueue.
+ *
+ * Thread must be locked.
+ *
+ * Called at splsched().
+ */
+boolean_t
+thread_unblock(
+ thread_t thread,
+ wait_result_t wresult)
{
- thread_t thread = current_thread();
- spl_t s;
+ boolean_t ready_for_runq = FALSE;
+ thread_t cthread = current_thread();
+ uint32_t new_run_count;
- s = splsched();
- wake_lock(thread);
+ /*
+ * Set wait_result.
+ */
+ thread->wait_result = wresult;
+
+ /*
+ * Cancel pending wait timer.
+ */
if (thread->wait_timer_is_set) {
if (timer_call_cancel(&thread->wait_timer))
thread->wait_timer_active--;
thread->wait_timer_is_set = FALSE;
}
- wake_unlock(thread);
- splx(s);
-}
-/*
- * thread_depress_timeout:
- *
- * Timeout routine for priority depression.
- */
-void
-thread_depress_timeout(
- thread_call_param_t p0,
- thread_call_param_t p1)
-{
- thread_t thread = p0;
- sched_policy_t *policy;
- spl_t s;
+ /*
+ * Update scheduling state: not waiting,
+ * set running.
+ */
+ thread->state &= ~(TH_WAIT|TH_UNINT);
- s = splsched();
- thread_lock(thread);
- policy = policy_id_to_sched_policy(thread->policy);
- thread_unlock(thread);
- splx(s);
+ if (!(thread->state & TH_RUN)) {
+ thread->state |= TH_RUN;
+ thread->last_made_runnable_time = mach_approximate_time();
- if (policy != SCHED_POLICY_NULL)
- policy->sp_ops.sp_thread_depress_timeout(policy, thread);
+ ready_for_runq = TRUE;
- thread_deallocate(thread);
-}
+ (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
-/*
- * Set up thread timeout element when thread is created.
- */
-void
-thread_timer_setup(
- thread_t thread)
-{
- timer_call_setup(&thread->wait_timer, thread_timer_expire, thread);
- thread->wait_timer_is_set = FALSE;
- thread->wait_timer_active = 1;
- thread->ref_count++;
+ /*
+ * Update run counts.
+ */
+ new_run_count = sched_run_incr(thread);
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ sched_share_incr(thread);
- thread_call_setup(&thread->depress_timer, thread_depress_timeout, thread);
-}
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_incr(thread);
+ }
+ } else {
+ /*
+ * Signal if idling on another processor.
+ */
+#if CONFIG_SCHED_IDLE_IN_PLACE
+ if (thread->state & TH_IDLE) {
+ processor_t processor = thread->last_processor;
-void
-thread_timer_terminate(void)
-{
- thread_t thread = current_thread();
- spl_t s;
+ if (processor != current_processor())
+ machine_signal_idle(processor);
+ }
+#else
+ assert((thread->state & TH_IDLE) == 0);
+#endif
- s = splsched();
- wake_lock(thread);
- if (thread->wait_timer_is_set) {
- if (timer_call_cancel(&thread->wait_timer))
- thread->wait_timer_active--;
- thread->wait_timer_is_set = FALSE;
+ new_run_count = sched_run_count; /* updated in thread_select_idle() */
}
- thread->wait_timer_active--;
- while (thread->wait_timer_active > 0) {
- assert_wait((event_t)&thread->wait_timer_active, THREAD_UNINT);
- wake_unlock(thread);
- splx(s);
+ /*
+ * Calculate deadline for real-time threads.
+ */
+ if (thread->sched_mode == TH_MODE_REALTIME) {
+ uint64_t ctime;
- thread_block((void (*)(void)) 0);
+ ctime = mach_absolute_time();
+ thread->realtime.deadline = thread->realtime.constraint + ctime;
+ }
- s = splsched();
- wake_lock(thread);
+ /*
+ * Clear old quantum, fail-safe computation, etc.
+ */
+ thread->quantum_remaining = 0;
+ thread->computation_metered = 0;
+ thread->reason = AST_NONE;
+
+ /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
+ * We also account for "double hop" thread signaling via
+ * the thread callout infrastructure.
+ * DRK: consider removing the callout wakeup counters in the future
+ * they're present for verification at the moment.
+ */
+ boolean_t aticontext, pidle;
+ ml_get_power_state(&aticontext, &pidle);
+
+ if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
+ ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
+ DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
+
+ uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd);
+
+ if (ttd) {
+ if (ttd <= timer_deadline_tracking_bin_1)
+ thread->thread_timer_wakeups_bin_1++;
+ else
+ if (ttd <= timer_deadline_tracking_bin_2)
+ thread->thread_timer_wakeups_bin_2++;
+ }
+
+ if (pidle) {
+ ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
+ }
+
+ } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
+ if (cthread->callout_woken_from_icontext) {
+ ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
+ thread->thread_callout_interrupt_wakeups++;
+ if (cthread->callout_woken_from_platform_idle) {
+ ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
+ thread->thread_callout_platform_idle_wakeups++;
+ }
+
+ cthread->callout_woke_thread = TRUE;
+ }
+ }
+
+ if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
+ thread->callout_woken_from_icontext = aticontext;
+ thread->callout_woken_from_platform_idle = pidle;
+ thread->callout_woke_thread = FALSE;
}
- wake_unlock(thread);
- splx(s);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result, new_run_count, 0);
- thread_deallocate(thread);
+ DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
+
+ return (ready_for_runq);
}
/*
- * Routine: thread_go_locked
+ * Routine: thread_go
* Purpose:
- * Start a thread running.
+ * Unblock and dispatch thread.
* Conditions:
* thread lock held, IPC locks may be held.
* thread must have been pulled from wait queue under same lock hold.
+ * thread must have been waiting
+ * Returns:
+ * KERN_SUCCESS - Thread was set running
+ *
+ * TODO: This should return void
*/
-void
-thread_go_locked(
- thread_t thread,
- int result)
+kern_return_t
+thread_go(
+ thread_t thread,
+ wait_result_t wresult)
{
- int state;
- sched_policy_t *policy;
- sf_return_t sfr;
-
assert(thread->at_safe_point == FALSE);
- assert(thread->wait_event == NO_EVENT);
- assert(thread->wait_queue == WAIT_QUEUE_NULL);
-
- if (thread->state & TH_WAIT) {
-
- thread->state &= ~(TH_WAIT|TH_UNINT);
- if (!(thread->state & TH_RUN)) {
- thread->state |= TH_RUN;
-#if THREAD_SWAPPER
- if (thread->state & TH_SWAPPED_OUT)
- thread_swapin(thread->top_act, FALSE);
- else
-#endif /* THREAD_SWAPPER */
- {
- policy = &sched_policy[thread->policy];
- sfr = policy->sp_ops.sp_thread_unblock(policy, thread);
- assert(sfr == SF_SUCCESS);
- }
- }
- thread->wait_result = result;
- }
+ assert(thread->wait_event == NO_EVENT64);
+ assert(thread->waitq == NULL);
-
- /*
- * The next few lines are a major hack. Hopefully this will get us
- * around all of the scheduling framework hooha. We can't call
- * sp_thread_unblock yet because we could still be finishing up the
- * durn two stage block on another processor and thread_setrun
- * could be called by s_t_u and we'll really be messed up then.
- */
- /* Don't mess with this if we are still swapped out */
- if (!(thread->state & TH_SWAPPED_OUT))
- thread->sp_state = MK_SP_RUNNABLE;
-
+ assert(!(thread->state & (TH_TERMINATE|TH_TERMINATE2)));
+ assert(thread->state & TH_WAIT);
+
+
+ if (thread_unblock(thread, wresult))
+ thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
+
+ return (KERN_SUCCESS);
}
-void
+/*
+ * Routine: thread_mark_wait_locked
+ * Purpose:
+ * Mark a thread as waiting. If, given the circumstances,
+ * it doesn't want to wait (i.e. already aborted), then
+ * indicate that in the return value.
+ * Conditions:
+ * at splsched() and thread is locked.
+ */
+__private_extern__
+wait_result_t
thread_mark_wait_locked(
- thread_t thread,
- int interruptible)
+ thread_t thread,
+ wait_interrupt_t interruptible)
{
+ boolean_t at_safe_point;
assert(thread == current_thread());
+ assert(!(thread->state & (TH_WAIT|TH_IDLE|TH_UNINT|TH_TERMINATE2)));
- thread->wait_result = -1; /* JMM - Needed for non-assert kernel */
- thread->state |= (interruptible && thread->interruptible) ?
- TH_WAIT : (TH_WAIT | TH_UNINT);
- thread->at_safe_point = (interruptible == THREAD_ABORTSAFE) && (thread->interruptible);
- thread->sleep_stamp = sched_tick;
-}
+ /*
+ * The thread may have certain types of interrupts/aborts masked
+ * off. Even if the wait location says these types of interrupts
+ * are OK, we have to honor mask settings (outer-scoped code may
+ * not be able to handle aborts at the moment).
+ */
+ if (interruptible > (thread->options & TH_OPT_INTMASK))
+ interruptible = thread->options & TH_OPT_INTMASK;
+
+ at_safe_point = (interruptible == THREAD_ABORTSAFE);
+
+ if ( interruptible == THREAD_UNINT ||
+ !(thread->sched_flags & TH_SFLAG_ABORT) ||
+ (!at_safe_point &&
+ (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
+
+ if ( !(thread->state & TH_TERMINATE))
+ DTRACE_SCHED(sleep);
+ thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
+ thread->at_safe_point = at_safe_point;
+ return (thread->wait_result = THREAD_WAITING);
+ }
+ else
+ if (thread->sched_flags & TH_SFLAG_ABORTSAFELY)
+ thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
+ return (thread->wait_result = THREAD_INTERRUPTED);
+}
/*
- * Routine: assert_wait_timeout
+ * Routine: thread_interrupt_level
* Purpose:
- * Assert that the thread intends to block,
- * waiting for a timeout (no user known event).
+ * Set the maximum interruptible state for the
+ * current thread. The effective value of any
+ * interruptible flag passed into assert_wait
+ * will never exceed this.
+ *
+ * Useful for code that must not be interrupted,
+ * but which calls code that doesn't know that.
+ * Returns:
+ * The old interrupt level for the thread.
*/
-unsigned int assert_wait_timeout_event;
-
-void
-assert_wait_timeout(
- mach_msg_timeout_t msecs,
- int interruptible)
+__private_extern__
+wait_interrupt_t
+thread_interrupt_level(
+ wait_interrupt_t new_level)
{
- spl_t s;
+ thread_t thread = current_thread();
+ wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
- assert_wait((event_t)&assert_wait_timeout_event, interruptible);
- thread_set_timer(msecs, 1000*NSEC_PER_USEC);
+ thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
+
+ return result;
}
/*
{
thread_t thread;
- extern unsigned int debug_mode;
#if DEBUG
if(debug_mode) return TRUE; /* Always succeed in debug mode */
thread = current_thread();
- return (thread == NULL || wait_queue_assert_possible(thread));
+ return (thread == NULL || waitq_wait_possible(thread));
}
/*
* Assert that the current thread is about to go to
* sleep until the specified event occurs.
*/
-void
+wait_result_t
assert_wait(
event_t event,
- int interruptible)
+ wait_interrupt_t interruptible)
+{
+ if (__improbable(event == NO_EVENT))
+ panic("%s() called with NO_EVENT", __func__);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE(event), 0, 0, 0, 0);
+
+ struct waitq *waitq;
+ waitq = global_eventq(event);
+ return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
+}
+
+wait_result_t
+assert_wait_timeout(
+ event_t event,
+ wait_interrupt_t interruptible,
+ uint32_t interval,
+ uint32_t scale_factor)
+{
+ thread_t thread = current_thread();
+ wait_result_t wresult;
+ uint64_t deadline;
+ spl_t s;
+
+ if (__improbable(event == NO_EVENT))
+ panic("%s() called with NO_EVENT", __func__);
+
+ struct waitq *waitq;
+ waitq = global_eventq(event);
+
+ s = splsched();
+ waitq_lock(waitq);
+ thread_lock(thread);
+
+ clock_interval_to_deadline(interval, scale_factor, &deadline);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE(event), interruptible, deadline, 0, 0);
+
+ wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
+ interruptible,
+ TIMEOUT_URGENCY_SYS_NORMAL,
+ deadline, TIMEOUT_NO_LEEWAY,
+ thread);
+
+ thread_unlock(thread);
+ waitq_unlock(waitq);
+ splx(s);
+ return wresult;
+}
+
+wait_result_t
+assert_wait_timeout_with_leeway(
+ event_t event,
+ wait_interrupt_t interruptible,
+ wait_timeout_urgency_t urgency,
+ uint32_t interval,
+ uint32_t leeway,
+ uint32_t scale_factor)
+{
+ thread_t thread = current_thread();
+ wait_result_t wresult;
+ uint64_t deadline;
+ uint64_t abstime;
+ uint64_t slop;
+ uint64_t now;
+ spl_t s;
+
+ if (__improbable(event == NO_EVENT))
+ panic("%s() called with NO_EVENT", __func__);
+
+ now = mach_absolute_time();
+ clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
+ deadline = now + abstime;
+
+ clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
+
+ struct waitq *waitq;
+ waitq = global_eventq(event);
+
+ s = splsched();
+ waitq_lock(waitq);
+ thread_lock(thread);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE(event), interruptible, deadline, 0, 0);
+
+ wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
+ interruptible,
+ urgency, deadline, slop,
+ thread);
+
+ thread_unlock(thread);
+ waitq_unlock(waitq);
+ splx(s);
+ return wresult;
+}
+
+wait_result_t
+assert_wait_deadline(
+ event_t event,
+ wait_interrupt_t interruptible,
+ uint64_t deadline)
+{
+ thread_t thread = current_thread();
+ wait_result_t wresult;
+ spl_t s;
+
+ if (__improbable(event == NO_EVENT))
+ panic("%s() called with NO_EVENT", __func__);
+
+ struct waitq *waitq;
+ waitq = global_eventq(event);
+
+ s = splsched();
+ waitq_lock(waitq);
+ thread_lock(thread);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE(event), interruptible, deadline, 0, 0);
+
+ wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
+ interruptible,
+ TIMEOUT_URGENCY_SYS_NORMAL, deadline,
+ TIMEOUT_NO_LEEWAY, thread);
+ thread_unlock(thread);
+ waitq_unlock(waitq);
+ splx(s);
+ return wresult;
+}
+
+wait_result_t
+assert_wait_deadline_with_leeway(
+ event_t event,
+ wait_interrupt_t interruptible,
+ wait_timeout_urgency_t urgency,
+ uint64_t deadline,
+ uint64_t leeway)
+{
+ thread_t thread = current_thread();
+ wait_result_t wresult;
+ spl_t s;
+
+ if (__improbable(event == NO_EVENT))
+ panic("%s() called with NO_EVENT", __func__);
+
+ struct waitq *waitq;
+ waitq = global_eventq(event);
+
+ s = splsched();
+ waitq_lock(waitq);
+ thread_lock(thread);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
+ VM_KERNEL_UNSLIDE(event), interruptible, deadline, 0, 0);
+
+ wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
+ interruptible,
+ urgency, deadline, leeway,
+ thread);
+
+ thread_unlock(thread);
+ waitq_unlock(waitq);
+ splx(s);
+ return wresult;
+}
+
+/*
+ * thread_isoncpu:
+ *
+ * Return TRUE if a thread is running on a processor such that an AST
+ * is needed to pull it out of userspace execution, or if executing in
+ * the kernel, bring to a context switch boundary that would cause
+ * thread state to be serialized in the thread PCB.
+ *
+ * Thread locked, returns the same way. While locked, fields
+ * like "state" cannot change. "runq" can change only from set to unset.
+ */
+static inline boolean_t
+thread_isoncpu(thread_t thread)
{
- register wait_queue_t wq;
- register int index;
+ /* Not running or runnable */
+ if (!(thread->state & TH_RUN))
+ return (FALSE);
- assert(event != NO_EVENT);
- assert(assert_wait_possible());
+ /* Waiting on a runqueue, not currently running */
+ /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
+ if (thread->runq != PROCESSOR_NULL)
+ return (FALSE);
+
+ /*
+ * Thread does not have a stack yet
+ * It could be on the stack alloc queue or preparing to be invoked
+ */
+ if (!thread->kernel_stack)
+ return (FALSE);
- index = wait_hash(event);
- wq = &wait_queues[index];
- wait_queue_assert_wait(wq,
- event,
- interruptible);
+ /*
+ * Thread must be running on a processor, or
+ * about to run, or just did run. In all these
+ * cases, an AST to the processor is needed
+ * to guarantee that the thread is kicked out
+ * of userspace and the processor has
+ * context switched (and saved register state).
+ */
+ return (TRUE);
}
-
/*
- * thread_[un]stop(thread)
- * Once a thread has blocked interruptibly (via assert_wait) prevent
- * it from running until thread_unstop.
+ * thread_stop:
*
- * If someone else has already stopped the thread, wait for the
- * stop to be cleared, and then stop it again.
+ * Force a preemption point for a thread and wait
+ * for it to stop running on a CPU. If a stronger
+ * guarantee is requested, wait until no longer
+ * runnable. Arbitrates access among
+ * multiple stop requests. (released by unstop)
*
- * Return FALSE if interrupted.
+ * The thread must enter a wait state and stop via a
+ * separate means.
*
- * NOTE: thread_hold/thread_suspend should be called on the activation
- * before calling thread_stop. TH_SUSP is only recognized when
- * a thread blocks and only prevents clear_wait/thread_wakeup
- * from restarting an interruptible wait. The wake_active flag is
- * used to indicate that someone is waiting on the thread.
+ * Returns FALSE if interrupted.
*/
boolean_t
thread_stop(
- thread_t thread)
+ thread_t thread,
+ boolean_t until_not_runnable)
{
- spl_t s;
+ wait_result_t wresult;
+ spl_t s = splsched();
+ boolean_t oncpu;
- s = splsched();
wake_lock(thread);
+ thread_lock(thread);
while (thread->state & TH_SUSP) {
thread->wake_active = TRUE;
- assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE);
+ thread_unlock(thread);
+
+ wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
wake_unlock(thread);
splx(s);
- thread_block((void (*)(void)) 0);
- if (current_thread()->wait_result != THREAD_AWAKENED)
+ if (wresult == THREAD_WAITING)
+ wresult = thread_block(THREAD_CONTINUE_NULL);
+
+ if (wresult != THREAD_AWAKENED)
return (FALSE);
s = splsched();
wake_lock(thread);
+ thread_lock(thread);
}
- thread_lock(thread);
+
thread->state |= TH_SUSP;
- thread_unlock(thread);
+ while ((oncpu = thread_isoncpu(thread)) ||
+ (until_not_runnable && (thread->state & TH_RUN))) {
+ processor_t processor;
+
+ if (oncpu) {
+ assert(thread->state & TH_RUN);
+ processor = thread->chosen_processor;
+ cause_ast_check(processor);
+ }
+
+ thread->wake_active = TRUE;
+ thread_unlock(thread);
+
+ wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
+ wake_unlock(thread);
+ splx(s);
+
+ if (wresult == THREAD_WAITING)
+ wresult = thread_block(THREAD_CONTINUE_NULL);
+
+ if (wresult != THREAD_AWAKENED) {
+ thread_unstop(thread);
+ return (FALSE);
+ }
+
+ s = splsched();
+ wake_lock(thread);
+ thread_lock(thread);
+ }
+
+ thread_unlock(thread);
wake_unlock(thread);
splx(s);
+
+ /*
+ * We return with the thread unlocked. To prevent it from
+ * transitioning to a runnable state (or from TH_RUN to
+ * being on the CPU), the caller must ensure the thread
+ * is stopped via an external means (such as an AST)
+ */
return (TRUE);
}
/*
- * Clear TH_SUSP and if the thread has been stopped and is now runnable,
- * put it back on the run queue.
+ * thread_unstop:
+ *
+ * Release a previous stop request and set
+ * the thread running if appropriate.
+ *
+ * Use only after a successful stop operation.
*/
void
thread_unstop(
- thread_t thread)
+ thread_t thread)
{
- sched_policy_t *policy;
- sf_return_t sfr;
- spl_t s;
+ spl_t s = splsched();
- s = splsched();
wake_lock(thread);
thread_lock(thread);
- if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP/*|TH_UNINT*/)) == TH_SUSP) {
- thread->state = (thread->state & ~TH_SUSP) | TH_RUN;
-#if THREAD_SWAPPER
- if (thread->state & TH_SWAPPED_OUT)
- thread_swapin(thread->top_act, FALSE);
- else
-#endif /* THREAD_SWAPPER */
- {
- policy = &sched_policy[thread->policy];
- sfr = policy->sp_ops.sp_thread_unblock(policy, thread);
- assert(sfr == SF_SUCCESS);
- }
- }
- else
+ assert((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) != TH_SUSP);
+
if (thread->state & TH_SUSP) {
thread->state &= ~TH_SUSP;
if (thread->wake_active) {
thread->wake_active = FALSE;
thread_unlock(thread);
+
+ thread_wakeup(&thread->wake_active);
wake_unlock(thread);
splx(s);
- thread_wakeup((event_t)&thread->wake_active);
return;
}
}
/*
- * Wait for the thread's RUN bit to clear
+ * thread_wait:
+ *
+ * Wait for a thread to stop running. (non-interruptible)
+ *
*/
-boolean_t
+void
thread_wait(
- thread_t thread)
+ thread_t thread,
+ boolean_t until_not_runnable)
{
- spl_t s;
+ wait_result_t wresult;
+ boolean_t oncpu;
+ processor_t processor;
+ spl_t s = splsched();
- s = splsched();
wake_lock(thread);
+ thread_lock(thread);
+
+ /*
+ * Wait until not running on a CPU. If stronger requirement
+ * desired, wait until not runnable. Assumption: if thread is
+ * on CPU, then TH_RUN is set, so we're not waiting in any case
+ * where the original, pure "TH_RUN" check would have let us
+ * finish.
+ */
+ while ((oncpu = thread_isoncpu(thread)) ||
+ (until_not_runnable && (thread->state & TH_RUN))) {
- while (thread->state & (TH_RUN/*|TH_UNINT*/)) {
- if (thread->last_processor != PROCESSOR_NULL)
- cause_ast_check(thread->last_processor);
+ if (oncpu) {
+ assert(thread->state & TH_RUN);
+ processor = thread->chosen_processor;
+ cause_ast_check(processor);
+ }
thread->wake_active = TRUE;
- assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE);
+ thread_unlock(thread);
+
+ wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
wake_unlock(thread);
splx(s);
- thread_block((void (*)(void))0);
- if (current_thread()->wait_result != THREAD_AWAKENED)
- return (FALSE);
+ if (wresult == THREAD_WAITING)
+ thread_block(THREAD_CONTINUE_NULL);
s = splsched();
wake_lock(thread);
+ thread_lock(thread);
}
+ thread_unlock(thread);
wake_unlock(thread);
splx(s);
-
- return (TRUE);
-}
-
-
-/*
- * thread_stop_wait(thread)
- * Stop the thread then wait for it to block interruptibly
- */
-boolean_t
-thread_stop_wait(
- thread_t thread)
-{
- if (thread_stop(thread)) {
- if (thread_wait(thread))
- return (TRUE);
-
- thread_unstop(thread);
- }
-
- return (FALSE);
}
-
/*
* Routine: clear_wait_internal
*
* Conditions:
* At splsched
* the thread is locked.
+ * Returns:
+ * KERN_SUCCESS thread was rousted out a wait
+ * KERN_FAILURE thread was waiting but could not be rousted
+ * KERN_NOT_WAITING thread was not waiting
*/
-void
+__private_extern__ kern_return_t
clear_wait_internal(
- thread_t thread,
- int result)
+ thread_t thread,
+ wait_result_t wresult)
{
- /*
- * If the thread isn't in a wait queue, just set it running. Otherwise,
- * try to remove it from the queue and, if successful, then set it
- * running. NEVER interrupt an uninterruptible thread.
- */
- if (!((result == THREAD_INTERRUPTED) && (thread->state & TH_UNINT))) {
- if (wait_queue_assert_possible(thread) ||
- (wait_queue_remove(thread) == KERN_SUCCESS)) {
- thread_go_locked(thread, result);
+ uint32_t i = LockTimeOut;
+ struct waitq *waitq = thread->waitq;
+
+ do {
+ if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
+ return (KERN_FAILURE);
+
+ if (waitq != NULL) {
+ assert(waitq_irq_safe(waitq)); //irqs are already disabled!
+ if (waitq_lock_try(waitq)) {
+ waitq_pull_thread_locked(waitq, thread);
+ waitq_unlock(waitq);
+ } else {
+ thread_unlock(thread);
+ delay(1);
+ thread_lock(thread);
+ if (waitq != thread->waitq)
+ return KERN_NOT_WAITING;
+ continue;
+ }
}
- }
+
+ /* TODO: Can we instead assert TH_TERMINATE is not set? */
+ if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT)
+ return (thread_go(thread, wresult));
+ else
+ return (KERN_NOT_WAITING);
+ } while ((--i > 0) || machine_timeout_suspended());
+
+ panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
+ thread, waitq, cpu_number());
+
+ return (KERN_FAILURE);
}
* thread thread to awaken
* result Wakeup result the thread should see
*/
-void
+kern_return_t
clear_wait(
- thread_t thread,
- int result)
+ thread_t thread,
+ wait_result_t result)
{
+ kern_return_t ret;
spl_t s;
s = splsched();
thread_lock(thread);
- clear_wait_internal(thread, result);
+ ret = clear_wait_internal(thread, result);
thread_unlock(thread);
splx(s);
+ return ret;
}
* and thread_wakeup_one.
*
*/
-void
+kern_return_t
thread_wakeup_prim(
event_t event,
boolean_t one_thread,
- int result)
+ wait_result_t result)
+{
+ return (thread_wakeup_prim_internal(event, one_thread, result, -1));
+}
+
+
+kern_return_t
+thread_wakeup_prim_internal(
+ event_t event,
+ boolean_t one_thread,
+ wait_result_t result,
+ int priority)
{
- register wait_queue_t wq;
- register int index;
+ if (__improbable(event == NO_EVENT))
+ panic("%s() called with NO_EVENT", __func__);
+
+ struct waitq *wq;
+
+ wq = global_eventq(event);
+ priority = (priority == -1 ? WAITQ_ALL_PRIORITIES : priority);
- index = wait_hash(event);
- wq = &wait_queues[index];
if (one_thread)
- wait_queue_wakeup_one(wq, event, result);
+ return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, priority);
else
- wait_queue_wakeup_all(wq, event, result);
+ return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, priority);
}
/*
* thread_bind:
*
- * Force a thread to execute on the specified processor.
- * If the thread is currently executing, it may wait until its
- * time slice is up before switching onto the specified processor.
+ * Force the current thread to execute on the specified processor.
+ * Takes effect after the next thread_block().
*
- * A processor of PROCESSOR_NULL causes the thread to be unbound.
- * xxx - DO NOT export this to users.
+ * Returns the previous binding. PROCESSOR_NULL means
+ * not bound.
+ *
+ * XXX - DO NOT export this to users - XXX
*/
-void
+processor_t
thread_bind(
- register thread_t thread,
- processor_t processor)
+ processor_t processor)
{
- spl_t s;
+ thread_t self = current_thread();
+ processor_t prev;
+ spl_t s;
s = splsched();
- thread_lock(thread);
- thread_bind_locked(thread, processor);
- thread_unlock(thread);
+ thread_lock(self);
+
+ prev = thread_bind_internal(self, processor);
+
+ thread_unlock(self);
splx(s);
+
+ return (prev);
}
/*
- * Select a thread for this processor (the current processor) to run.
- * May select the current thread, which must already be locked.
+ * thread_bind_internal:
+ *
+ * If the specified thread is not the current thread, and it is currently
+ * running on another CPU, a remote AST must be sent to that CPU to cause
+ * the thread to migrate to its bound processor. Otherwise, the migration
+ * will occur at the next quantum expiration or blocking point.
+ *
+ * When the thread is the current thread, and explicit thread_block() should
+ * be used to force the current processor to context switch away and
+ * let the thread migrate to the bound processor.
+ *
+ * Thread must be locked, and at splsched.
*/
-thread_t
-thread_select(
- register processor_t myprocessor)
+
+static processor_t
+thread_bind_internal(
+ thread_t thread,
+ processor_t processor)
{
- register thread_t thread;
- processor_set_t pset;
- register run_queue_t runq = &myprocessor->runq;
- boolean_t other_runnable;
- sched_policy_t *policy;
+ processor_t prev;
- /*
- * Check for other non-idle runnable threads.
- */
- myprocessor->first_quantum = TRUE;
- pset = myprocessor->processor_set;
- thread = current_thread();
+ /* <rdar://problem/15102234> */
+ assert(thread->sched_pri < BASEPRI_RTQUEUES);
+ /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
+ assert(thread->runq == PROCESSOR_NULL);
-#if 0 /* CHECKME! */
- thread->unconsumed_quantum = myprocessor->quantum;
-#endif
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
- simple_lock(&runq->lock);
- simple_lock(&pset->runq.lock);
+ prev = thread->bound_processor;
+ thread->bound_processor = processor;
- other_runnable = runq->count > 0 || pset->runq.count > 0;
+ return (prev);
+}
- if ( thread->state == TH_RUN &&
- (!other_runnable ||
- (runq->highq < thread->sched_pri &&
- pset->runq.highq < thread->sched_pri)) &&
- thread->processor_set == pset &&
- (thread->bound_processor == PROCESSOR_NULL ||
- thread->bound_processor == myprocessor) ) {
+/*
+ * thread_vm_bind_group_add:
+ *
+ * The "VM bind group" is a special mechanism to mark a collection
+ * of threads from the VM subsystem that, in general, should be scheduled
+ * with only one CPU of parallelism. To accomplish this, we initially
+ * bind all the threads to the master processor, which has the effect
+ * that only one of the threads in the group can execute at once, including
+ * preempting threads in the group that are a lower priority. Future
+ * mechanisms may use more dynamic mechanisms to prevent the collection
+ * of VM threads from using more CPU time than desired.
+ *
+ * The current implementation can result in priority inversions where
+ * compute-bound priority 95 or realtime threads that happen to have
+ * landed on the master processor prevent the VM threads from running.
+ * When this situation is detected, we unbind the threads for one
+ * scheduler tick to allow the scheduler to run the threads an
+ * additional CPUs, before restoring the binding (assuming high latency
+ * is no longer a problem).
+ */
- /* I am the highest priority runnable (non-idle) thread */
- simple_unlock(&pset->runq.lock);
- simple_unlock(&runq->lock);
+/*
+ * The current max is provisioned for:
+ * vm_compressor_swap_trigger_thread (92)
+ * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
+ * vm_pageout_continue (92)
+ * memorystatus_thread (95)
+ */
+#define MAX_VM_BIND_GROUP_COUNT (5)
+decl_simple_lock_data(static,sched_vm_group_list_lock);
+static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
+static int sched_vm_group_thread_count;
+static boolean_t sched_vm_group_temporarily_unbound = FALSE;
- /* Update the thread's meta-priority */
- policy = policy_id_to_sched_policy(thread->policy);
- assert(policy != SCHED_POLICY_NULL);
- (void)policy->sp_ops.sp_thread_update_mpri(policy, thread);
- }
- else
- if (other_runnable) {
- simple_unlock(&pset->runq.lock);
- simple_unlock(&runq->lock);
- thread = choose_thread(myprocessor);
- }
- else {
- simple_unlock(&pset->runq.lock);
- simple_unlock(&runq->lock);
+void
+thread_vm_bind_group_add(void)
+{
+ thread_t self = current_thread();
- /*
- * Nothing is runnable, so set this processor idle if it
- * was running. If it was in an assignment or shutdown,
- * leave it alone. Return its idle thread.
- */
- simple_lock(&pset->idle_lock);
- if (myprocessor->state == PROCESSOR_RUNNING) {
- myprocessor->state = PROCESSOR_IDLE;
- /*
- * XXX Until it goes away, put master on end of queue, others
- * XXX on front so master gets used last.
- */
- if (myprocessor == master_processor)
- queue_enter(&(pset->idle_queue), myprocessor,
- processor_t, processor_queue);
- else
- queue_enter_first(&(pset->idle_queue), myprocessor,
- processor_t, processor_queue);
+ thread_reference_internal(self);
+ self->options |= TH_OPT_SCHED_VM_GROUP;
- pset->idle_count++;
- }
- simple_unlock(&pset->idle_lock);
+ simple_lock(&sched_vm_group_list_lock);
+ assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
+ sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
+ simple_unlock(&sched_vm_group_list_lock);
- thread = myprocessor->idle_thread;
- }
+ thread_bind(master_processor);
+
+ /* Switch to bound processor if not already there */
+ thread_block(THREAD_CONTINUE_NULL);
+}
+
+static void
+sched_vm_group_maintenance(void)
+{
+ uint64_t ctime = mach_absolute_time();
+ uint64_t longtime = ctime - sched_tick_interval;
+ int i;
+ spl_t s;
+ boolean_t high_latency_observed = FALSE;
+ boolean_t runnable_and_not_on_runq_observed = FALSE;
+ boolean_t bind_target_changed = FALSE;
+ processor_t bind_target = PROCESSOR_NULL;
+
+ /* Make sure nobody attempts to add new threads while we are enumerating them */
+ simple_lock(&sched_vm_group_list_lock);
+
+ s = splsched();
+
+ for (i=0; i < sched_vm_group_thread_count; i++) {
+ thread_t thread = sched_vm_group_thread_list[i];
+ assert(thread != THREAD_NULL);
+ thread_lock(thread);
+ if ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN) {
+ if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
+ high_latency_observed = TRUE;
+ } else if (thread->runq == PROCESSOR_NULL) {
+ /* There are some cases where a thread be transitiong that also fall into this case */
+ runnable_and_not_on_runq_observed = TRUE;
+ }
+ }
+ thread_unlock(thread);
+
+ if (high_latency_observed && runnable_and_not_on_runq_observed) {
+ /* All the things we are looking for are true, stop looking */
+ break;
+ }
+ }
+
+ splx(s);
+
+ if (sched_vm_group_temporarily_unbound) {
+ /* If we turned off binding, make sure everything is OK before rebinding */
+ if (!high_latency_observed) {
+ /* rebind */
+ bind_target_changed = TRUE;
+ bind_target = master_processor;
+ sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
+ }
+ } else {
+ /*
+ * Check if we're in a bad state, which is defined by high
+ * latency with no core currently executing a thread. If a
+ * single thread is making progress on a CPU, that means the
+ * binding concept to reduce parallelism is working as
+ * designed.
+ */
+ if (high_latency_observed && !runnable_and_not_on_runq_observed) {
+ /* unbind */
+ bind_target_changed = TRUE;
+ bind_target = PROCESSOR_NULL;
+ sched_vm_group_temporarily_unbound = TRUE;
+ }
+ }
+
+ if (bind_target_changed) {
+ s = splsched();
+ for (i=0; i < sched_vm_group_thread_count; i++) {
+ thread_t thread = sched_vm_group_thread_list[i];
+ boolean_t removed;
+ assert(thread != THREAD_NULL);
+
+ thread_lock(thread);
+ removed = thread_run_queue_remove(thread);
+ if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
+ thread_bind_internal(thread, bind_target);
+ } else {
+ /*
+ * Thread was in the middle of being context-switched-to,
+ * or was in the process of blocking. To avoid switching the bind
+ * state out mid-flight, defer the change if possible.
+ */
+ if (bind_target == PROCESSOR_NULL) {
+ thread_bind_internal(thread, bind_target);
+ } else {
+ sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
+ }
+ }
+
+ if (removed) {
+ thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
+ }
+ thread_unlock(thread);
+ }
+ splx(s);
+ }
+
+ simple_unlock(&sched_vm_group_list_lock);
+}
+
+/* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
+ * rebalancing opportunity exists when a core is (instantaneously) idle, but
+ * other SMT-capable cores may be over-committed. TODO: some possible negatives:
+ * IPI thrash if this core does not remain idle following the load balancing ASTs
+ * Idle "thrash", when IPI issue is followed by idle entry/core power down
+ * followed by a wakeup shortly thereafter.
+ */
+
+#if (DEVELOPMENT || DEBUG)
+int sched_smt_balance = 1;
+#endif
+
+#if __SMP__
+/* Invoked with pset locked, returns with pset unlocked */
+static void
+sched_SMT_balance(processor_t cprocessor, processor_set_t cpset) {
+ processor_t ast_processor = NULL;
+
+#if (DEVELOPMENT || DEBUG)
+ if (__improbable(sched_smt_balance == 0))
+ goto smt_balance_exit;
+#endif
+
+ assert(cprocessor == current_processor());
+ if (cprocessor->is_SMT == FALSE)
+ goto smt_balance_exit;
+
+ processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
+
+ /* Determine if both this processor and its sibling are idle,
+ * indicating an SMT rebalancing opportunity.
+ */
+ if (sib_processor->state != PROCESSOR_IDLE)
+ goto smt_balance_exit;
+
+ processor_t sprocessor;
+
+ sprocessor = (processor_t)queue_first(&cpset->active_queue);
+
+ while (!queue_end(&cpset->active_queue, (queue_entry_t)sprocessor)) {
+ if ((sprocessor->state == PROCESSOR_RUNNING) &&
+ (sprocessor->processor_primary != sprocessor) &&
+ (sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
+ (sprocessor->current_pri < BASEPRI_RTQUEUES) &&
+ ((cpset->pending_AST_cpu_mask & (1ULL << sprocessor->cpu_id)) == 0)) {
+ assert(sprocessor != cprocessor);
+ ast_processor = sprocessor;
+ break;
+ }
+ sprocessor = (processor_t)queue_next((queue_entry_t)sprocessor);
+ }
+
+smt_balance_exit:
+ pset_unlock(cpset);
+
+ if (ast_processor) {
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
+ cause_ast_check(ast_processor);
+ }
+}
+#endif /* __SMP__ */
+
+/*
+ * thread_select:
+ *
+ * Select a new thread for the current processor to execute.
+ *
+ * May select the current thread, which must be locked.
+ */
+static thread_t
+thread_select(
+ thread_t thread,
+ processor_t processor,
+ ast_t reason)
+{
+ processor_set_t pset = processor->processor_set;
+ thread_t new_thread = THREAD_NULL;
+
+ assert(processor == current_processor());
+ assert((thread->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
+
+ do {
+ /*
+ * Update the priority.
+ */
+ if (SCHED(can_update_priority)(thread))
+ SCHED(update_priority)(thread);
+
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+
+ pset_lock(pset);
+
+ assert(processor->state != PROCESSOR_OFF_LINE);
+
+ if (!processor->is_recommended) {
+ /*
+ * The performance controller has provided a hint to not dispatch more threads,
+ * unless they are bound to us (and thus we are the only option
+ */
+ if (!SCHED(processor_bound_count)(processor)) {
+ goto idle;
+ }
+ } else if (processor->processor_primary != processor) {
+ /*
+ * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
+ * we should look for work only under the same conditions that choose_processor()
+ * would have assigned work, which is when all primary processors have been assigned work.
+ *
+ * An exception is that bound threads are dispatched to a processor without going through
+ * choose_processor(), so in those cases we should continue trying to dequeue work.
+ */
+ if (!SCHED(processor_bound_count)(processor) && !queue_empty(&pset->idle_queue) && !rt_runq.count) {
+ goto idle;
+ }
+ }
+
+ rt_lock_lock();
+
+ /*
+ * Test to see if the current thread should continue
+ * to run on this processor. Must not be attempting to wait, and not
+ * bound to a different processor, nor be in the wrong
+ * processor set, nor be forced to context switch by TH_SUSP.
+ *
+ * Note that there are never any RT threads in the regular runqueue.
+ *
+ * This code is very insanely tricky.
+ */
+
+ if (((thread->state & (TH_TERMINATE|TH_IDLE|TH_WAIT|TH_RUN|TH_SUSP)) == TH_RUN) &&
+ (thread->sched_pri >= BASEPRI_RTQUEUES || processor->processor_primary == processor) &&
+ (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor) &&
+ (thread->affinity_set == AFFINITY_SET_NULL || thread->affinity_set->aset_pset == pset)) {
+ /*
+ * RT threads with un-expired quantum stay on processor,
+ * unless there's a valid RT thread with an earlier deadline.
+ */
+ if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
+ if (rt_runq.count > 0) {
+ thread_t next_rt;
+
+ next_rt = (thread_t)queue_first(&rt_runq.queue);
+
+ assert(next_rt->runq == THREAD_ON_RT_RUNQ);
+
+ if (next_rt->realtime.deadline < processor->deadline &&
+ (next_rt->bound_processor == PROCESSOR_NULL ||
+ next_rt->bound_processor == processor)) {
+ /* The next RT thread is better, so pick it off the runqueue. */
+ goto pick_new_rt_thread;
+ }
+ }
+
+ /* This is still the best RT thread to run. */
+ processor->deadline = thread->realtime.deadline;
+
+ rt_lock_unlock();
+ pset_unlock(pset);
+
+ return (thread);
+ }
+
+ if ((rt_runq.count == 0) &&
+ SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
+ /* This thread is still the highest priority runnable (non-idle) thread */
+ processor->deadline = UINT64_MAX;
+
+ rt_lock_unlock();
+ pset_unlock(pset);
+
+ return (thread);
+ }
+ }
+
+ /* OK, so we're not going to run the current thread. Look at the RT queue. */
+ if (rt_runq.count > 0) {
+ thread_t next_rt = (thread_t)queue_first(&rt_runq.queue);
+
+ assert(next_rt->runq == THREAD_ON_RT_RUNQ);
+
+ if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
+ (next_rt->bound_processor == processor)))) {
+pick_new_rt_thread:
+ new_thread = (thread_t)dequeue_head(&rt_runq.queue);
+
+ new_thread->runq = PROCESSOR_NULL;
+ SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count);
+ rt_runq.count--;
+
+ processor->deadline = new_thread->realtime.deadline;
+
+ rt_lock_unlock();
+ pset_unlock(pset);
+
+ return (new_thread);
+ }
+ }
+
+ processor->deadline = UINT64_MAX;
+ rt_lock_unlock();
+
+ /* No RT threads, so let's look at the regular threads. */
+ if ((new_thread = SCHED(choose_thread)(processor, MINPRI, reason)) != THREAD_NULL) {
+ pset_unlock(pset);
+ return (new_thread);
+ }
+
+#if __SMP__
+ if (SCHED(steal_thread_enabled)) {
+ /*
+ * No runnable threads, attempt to steal
+ * from other processors. Returns with pset lock dropped.
+ */
+
+ if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
+ return (new_thread);
+ }
+
+ /*
+ * If other threads have appeared, shortcut
+ * around again.
+ */
+ if (!SCHED(processor_queue_empty)(processor) || rt_runq.count > 0)
+ continue;
+
+ pset_lock(pset);
+ }
+#endif
+
+ idle:
+ /*
+ * Nothing is runnable, so set this processor idle if it
+ * was running.
+ */
+ if (processor->state == PROCESSOR_RUNNING) {
+ remqueue((queue_entry_t)processor);
+ processor->state = PROCESSOR_IDLE;
+
+ if (processor->processor_primary == processor) {
+ enqueue_head(&pset->idle_queue, (queue_entry_t)processor);
+ }
+ else {
+ enqueue_head(&pset->idle_secondary_queue, (queue_entry_t)processor);
+ }
+ }
+
+#if __SMP__
+ /* Invoked with pset locked, returns with pset unlocked */
+ sched_SMT_balance(processor, pset);
+#else
+ pset_unlock(pset);
+#endif
+
+#if CONFIG_SCHED_IDLE_IN_PLACE
+ /*
+ * Choose idle thread if fast idle is not possible.
+ */
+ if (processor->processor_primary != processor)
+ return (processor->idle_thread);
+
+ if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES)
+ return (processor->idle_thread);
+
+ /*
+ * Perform idling activities directly without a
+ * context switch. Return dispatched thread,
+ * else check again for a runnable thread.
+ */
+ new_thread = thread_select_idle(thread, processor);
+
+#else /* !CONFIG_SCHED_IDLE_IN_PLACE */
+
+ /*
+ * Do a full context switch to idle so that the current
+ * thread can start running on another processor without
+ * waiting for the fast-idled processor to wake up.
+ */
+ new_thread = processor->idle_thread;
+
+#endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
+
+ } while (new_thread == THREAD_NULL);
+
+ return (new_thread);
+}
+
+#if CONFIG_SCHED_IDLE_IN_PLACE
+/*
+ * thread_select_idle:
+ *
+ * Idle the processor using the current thread context.
+ *
+ * Called with thread locked, then dropped and relocked.
+ */
+static thread_t
+thread_select_idle(
+ thread_t thread,
+ processor_t processor)
+{
+ thread_t new_thread;
+ uint64_t arg1, arg2;
+ int urgency;
+
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_decr(thread);
+
+ sched_share_decr(thread);
+ }
+ sched_run_decr(thread);
+
+ thread->state |= TH_IDLE;
+ processor->current_pri = IDLEPRI;
+ processor->current_thmode = TH_MODE_NONE;
+ processor->current_sfi_class = SFI_CLASS_KERNEL;
+
+ /* Reload precise timing global policy to thread-local policy */
+ thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
+
+ thread_unlock(thread);
+
+ /*
+ * Switch execution timing to processor idle thread.
+ */
+ processor->last_dispatch = mach_absolute_time();
+
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+ commpage_update_mach_approximate_time(processor->last_dispatch);
+#endif
+
+ thread->last_run_time = processor->last_dispatch;
+ thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer);
+ PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
+
+ /*
+ * Cancel the quantum timer while idling.
+ */
+ timer_call_cancel(&processor->quantum_timer);
+ processor->first_timeslice = FALSE;
+
+ (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
+
+ thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, NULL);
+
+ /*
+ * Enable interrupts and perform idling activities. No
+ * preemption due to TH_IDLE being set.
+ */
+ spllo(); new_thread = processor_idle(thread, processor);
+
+ /*
+ * Return at splsched.
+ */
+ (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
+
+ thread_lock(thread);
+
+ /*
+ * If awakened, switch to thread timer and start a new quantum.
+ * Otherwise skip; we will context switch to another thread or return here.
+ */
+ if (!(thread->state & TH_WAIT)) {
+ processor->last_dispatch = mach_absolute_time();
+ thread_timer_event(processor->last_dispatch, &thread->system_timer);
+ PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
+
+ thread_quantum_init(thread);
+ processor->quantum_end = processor->last_dispatch + thread->quantum_remaining;
+ timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+ processor->first_timeslice = TRUE;
+
+ thread->computation_epoch = processor->last_dispatch;
+ }
+
+ thread->state &= ~TH_IDLE;
+
+ urgency = thread_get_urgency(thread, &arg1, &arg2);
+
+ thread_tell_urgency(urgency, arg1, arg2, 0, new_thread);
+
+ sched_run_incr(thread);
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ sched_share_incr(thread);
+
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_incr(thread);
+ }
+
+ return (new_thread);
+}
+#endif /* CONFIG_SCHED_IDLE_IN_PLACE */
+
+/*
+ * thread_invoke
+ *
+ * Called at splsched with neither thread locked.
+ *
+ * Perform a context switch and start executing the new thread.
+ *
+ * Returns FALSE when the context switch didn't happen.
+ * The reference to the new thread is still consumed.
+ *
+ * "self" is what is currently running on the processor,
+ * "thread" is the new thread to context switch to
+ * (which may be the same thread in some cases)
+ */
+static boolean_t
+thread_invoke(
+ thread_t self,
+ thread_t thread,
+ ast_t reason)
+{
+ if (__improbable(get_preemption_level() != 0)) {
+ int pl = get_preemption_level();
+ panic("thread_invoke: preemption_level %d, possible cause: %s",
+ pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
+ "blocking while holding a spinlock, or within interrupt context"));
+ }
+
+ thread_continue_t continuation = self->continuation;
+ void *parameter = self->parameter;
+ processor_t processor;
+
+ uint64_t ctime = mach_absolute_time();
+
+#ifdef CONFIG_MACH_APPROXIMATE_TIME
+ commpage_update_mach_approximate_time(ctime);
+#endif
+
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ sched_timeshare_consider_maintenance(ctime);
+#endif
+
+ assert(self == current_thread());
+ assert(self->runq == PROCESSOR_NULL);
+ assert((self->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
+
+ thread_lock(thread);
+
+ assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
+ assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
+ assert(thread->runq == PROCESSOR_NULL);
+
+ /* Reload precise timing global policy to thread-local policy */
+ thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
+
+ /* Update SFI class based on other factors */
+ thread->sfi_class = sfi_thread_classify(thread);
+
+ /* Allow realtime threads to hang onto a stack. */
+ if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack)
+ self->reserved_stack = self->kernel_stack;
+
+ if (continuation != NULL) {
+ if (!thread->kernel_stack) {
+ /*
+ * If we are using a privileged stack,
+ * check to see whether we can exchange it with
+ * that of the other thread.
+ */
+ if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
+ goto need_stack;
+
+ /*
+ * Context switch by performing a stack handoff.
+ */
+ continuation = thread->continuation;
+ parameter = thread->parameter;
+
+ processor = current_processor();
+ processor->active_thread = thread;
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ if (thread->last_processor != processor && thread->last_processor != NULL) {
+ if (thread->last_processor->processor_set != processor->processor_set)
+ thread->ps_switch++;
+ thread->p_switch++;
+ }
+ thread->last_processor = processor;
+ thread->c_switch++;
+ ast_context(thread);
+
+ thread_unlock(thread);
+
+ self->reason = reason;
+
+ processor->last_dispatch = ctime;
+ self->last_run_time = ctime;
+ thread_timer_event(ctime, &thread->system_timer);
+ PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
+
+ /*
+ * Since non-precise user/kernel time doesn't update the state timer
+ * during privilege transitions, synthesize an event now.
+ */
+ if (!thread->precise_user_kernel_time) {
+ timer_switch(PROCESSOR_DATA(processor, current_state),
+ ctime,
+ PROCESSOR_DATA(processor, current_state));
+ }
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
+ self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
+
+ if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
+ SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
+ }
+
+ DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
+
+ SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
+
+ TLOG(1, "thread_invoke: calling stack_handoff\n");
+ stack_handoff(self, thread);
+
+ /* 'self' is now off core */
+ assert(thread == current_thread());
+
+ DTRACE_SCHED(on__cpu);
+
+ thread_dispatch(self, thread);
+
+ thread->continuation = thread->parameter = NULL;
+
+ counter(c_thread_invoke_hits++);
+
+ (void) spllo();
+
+ assert(continuation);
+ call_continuation(continuation, parameter, thread->wait_result);
+ /*NOTREACHED*/
+ }
+ else if (thread == self) {
+ /* same thread but with continuation */
+ ast_context(self);
+ counter(++c_thread_invoke_same);
+
+ thread_unlock(self);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
+ self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
+
+ self->continuation = self->parameter = NULL;
+
+ (void) spllo();
+
+ call_continuation(continuation, parameter, self->wait_result);
+ /*NOTREACHED*/
+ }
+ } else {
+ /*
+ * Check that the other thread has a stack
+ */
+ if (!thread->kernel_stack) {
+need_stack:
+ if (!stack_alloc_try(thread)) {
+ counter(c_thread_invoke_misses++);
+ thread_unlock(thread);
+ thread_stack_enqueue(thread);
+ return (FALSE);
+ }
+ } else if (thread == self) {
+ ast_context(self);
+ counter(++c_thread_invoke_same);
+ thread_unlock(self);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
+ self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
+
+ return (TRUE);
+ }
+ }
+
+ /*
+ * Context switch by full context save.
+ */
+ processor = current_processor();
+ processor->active_thread = thread;
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ if (thread->last_processor != processor && thread->last_processor != NULL) {
+ if (thread->last_processor->processor_set != processor->processor_set)
+ thread->ps_switch++;
+ thread->p_switch++;
+ }
+ thread->last_processor = processor;
+ thread->c_switch++;
+ ast_context(thread);
+
+ thread_unlock(thread);
+
+ counter(c_thread_invoke_csw++);
+
+ self->reason = reason;
+
+ processor->last_dispatch = ctime;
+ self->last_run_time = ctime;
+ thread_timer_event(ctime, &thread->system_timer);
+ PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
+
+ /*
+ * Since non-precise user/kernel time doesn't update the state timer
+ * during privilege transitions, synthesize an event now.
+ */
+ if (!thread->precise_user_kernel_time) {
+ timer_switch(PROCESSOR_DATA(processor, current_state),
+ ctime,
+ PROCESSOR_DATA(processor, current_state));
+ }
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
+ self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
+
+ if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
+ SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
+ }
+
+ DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
+
+ SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
+
+ /*
+ * This is where we actually switch register context,
+ * and address space if required. We will next run
+ * as a result of a subsequent context switch.
+ *
+ * Once registers are switched and the processor is running "thread",
+ * the stack variables and non-volatile registers will contain whatever
+ * was there the last time that thread blocked. No local variables should
+ * be used after this point, except for the special case of "thread", which
+ * the platform layer returns as the previous thread running on the processor
+ * via the function call ABI as a return register, and "self", which may have
+ * been stored on the stack or a non-volatile register, but a stale idea of
+ * what was on the CPU is newly-accurate because that thread is again
+ * running on the CPU.
+ */
+ assert(continuation == self->continuation);
+ thread = machine_switch_context(self, continuation, thread);
+ assert(self == current_thread());
+ TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
+
+ DTRACE_SCHED(on__cpu);
+
+ /*
+ * We have been resumed and are set to run.
+ */
+ thread_dispatch(thread, self);
+
+ if (continuation) {
+ self->continuation = self->parameter = NULL;
+
+ (void) spllo();
+
+ call_continuation(continuation, parameter, self->wait_result);
+ /*NOTREACHED*/
+ }
+
+ return (TRUE);
+}
+
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+/*
+ * pset_cancel_deferred_dispatch:
+ *
+ * Cancels all ASTs that we can cancel for the given processor set
+ * if the current processor is running the last runnable thread in the
+ * system.
+ *
+ * This function assumes the current thread is runnable. This must
+ * be called with the pset unlocked.
+ */
+static void
+pset_cancel_deferred_dispatch(
+ processor_set_t pset,
+ processor_t processor)
+{
+ processor_t active_processor = NULL;
+ uint32_t sampled_sched_run_count;
+
+ pset_lock(pset);
+ sampled_sched_run_count = (volatile uint32_t) sched_run_count;
+
+ /*
+ * If we have emptied the run queue, and our current thread is runnable, we
+ * should tell any processors that are still DISPATCHING that they will
+ * probably not have any work to do. In the event that there are no
+ * pending signals that we can cancel, this is also uninteresting.
+ *
+ * In the unlikely event that another thread becomes runnable while we are
+ * doing this (sched_run_count is atomically updated, not guarded), the
+ * codepath making it runnable SHOULD (a dangerous word) need the pset lock
+ * in order to dispatch it to a processor in our pset. So, the other
+ * codepath will wait while we squash all cancelable ASTs, get the pset
+ * lock, and then dispatch the freshly runnable thread. So this should be
+ * correct (we won't accidentally have a runnable thread that hasn't been
+ * dispatched to an idle processor), if not ideal (we may be restarting the
+ * dispatch process, which could have some overhead).
+ *
+ */
+ if ((sampled_sched_run_count == 1) &&
+ (pset->pending_deferred_AST_cpu_mask)) {
+ qe_foreach_element_safe(active_processor, &pset->active_queue, processor_queue) {
+ /*
+ * If a processor is DISPATCHING, it could be because of
+ * a cancelable signal.
+ *
+ * IF the processor is not our
+ * current processor (the current processor should not
+ * be DISPATCHING, so this is a bit paranoid), AND there
+ * is a cancelable signal pending on the processor, AND
+ * there is no non-cancelable signal pending (as there is
+ * no point trying to backtrack on bringing the processor
+ * up if a signal we cannot cancel is outstanding), THEN
+ * it should make sense to roll back the processor state
+ * to the IDLE state.
+ *
+ * If the racey nature of this approach (as the signal
+ * will be arbitrated by hardware, and can fire as we
+ * roll back state) results in the core responding
+ * despite being pushed back to the IDLE state, it
+ * should be no different than if the core took some
+ * interrupt while IDLE.
+ */
+ if ((active_processor->state == PROCESSOR_DISPATCHING) &&
+ (pset->pending_deferred_AST_cpu_mask & (1ULL << active_processor->cpu_id)) &&
+ (!(pset->pending_AST_cpu_mask & (1ULL << active_processor->cpu_id))) &&
+ (active_processor != processor)) {
+ /*
+ * Squash all of the processor state back to some
+ * reasonable facsimile of PROCESSOR_IDLE.
+ *
+ * TODO: What queue policy do we actually want here?
+ * We want to promote selection of a good processor
+ * to run on. Do we want to enqueue at the head?
+ * The tail? At the (relative) old position in the
+ * queue? Or something else entirely?
+ */
+ re_queue_head(&pset->idle_queue, (queue_entry_t)active_processor);
+
+ assert(active_processor->next_thread == THREAD_NULL);
+
+ active_processor->current_pri = IDLEPRI;
+ active_processor->current_thmode = TH_MODE_FIXED;
+ active_processor->current_sfi_class = SFI_CLASS_KERNEL;
+ active_processor->deadline = UINT64_MAX;
+ active_processor->state = PROCESSOR_IDLE;
+ pset->pending_deferred_AST_cpu_mask &= ~(1U << active_processor->cpu_id);
+ machine_signal_idle_cancel(active_processor);
+ }
+
+ }
+ }
+
+ pset_unlock(pset);
+}
+#else
+/* We don't support deferred ASTs; everything is candycanes and sunshine. */
+#endif
+
+/*
+ * thread_dispatch:
+ *
+ * Handle threads at context switch. Re-dispatch other thread
+ * if still running, otherwise update run state and perform
+ * special actions. Update quantum for other thread and begin
+ * the quantum for ourselves.
+ *
+ * "thread" is the old thread that we have switched away from.
+ * "self" is the new current thread that we have context switched to
+ *
+ * Called at splsched.
+ */
+void
+thread_dispatch(
+ thread_t thread,
+ thread_t self)
+{
+ processor_t processor = self->last_processor;
+
+ assert(processor == current_processor());
+ assert(self == current_thread());
+ assert(thread != self);
+
+ if (thread != THREAD_NULL) {
+ /*
+ * If blocked at a continuation, discard
+ * the stack.
+ */
+ if (thread->continuation != NULL && thread->kernel_stack != 0)
+ stack_free(thread);
+
+ if (thread->state & TH_IDLE) {
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), 0, thread->state, sched_run_count, 0);
+ } else {
+ int64_t consumed;
+ int64_t remainder = 0;
+
+ if (processor->quantum_end > processor->last_dispatch)
+ remainder = processor->quantum_end -
+ processor->last_dispatch;
+
+ consumed = thread->quantum_remaining - remainder;
+
+ if ((thread->reason & AST_LEDGER) == 0) {
+ /*
+ * Bill CPU time to both the task and
+ * the individual thread.
+ */
+ ledger_credit(thread->t_ledger,
+ task_ledgers.cpu_time, consumed);
+ ledger_credit(thread->t_threadledger,
+ thread_ledgers.cpu_time, consumed);
+#ifdef CONFIG_BANK
+ if (thread->t_bankledger) {
+ ledger_credit(thread->t_bankledger,
+ bank_ledgers.cpu_time,
+ (consumed - thread->t_deduct_bank_ledger_time));
+
+ }
+ thread->t_deduct_bank_ledger_time =0;
+#endif
+ }
+
+ wake_lock(thread);
+ thread_lock(thread);
+
+ /*
+ * Compute remainder of current quantum.
+ */
+ if (processor->first_timeslice &&
+ processor->quantum_end > processor->last_dispatch)
+ thread->quantum_remaining = (uint32_t)remainder;
+ else
+ thread->quantum_remaining = 0;
+
+ if (thread->sched_mode == TH_MODE_REALTIME) {
+ /*
+ * Cancel the deadline if the thread has
+ * consumed the entire quantum.
+ */
+ if (thread->quantum_remaining == 0) {
+ thread->realtime.deadline = UINT64_MAX;
+ }
+ } else {
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+ /*
+ * For non-realtime threads treat a tiny
+ * remaining quantum as an expired quantum
+ * but include what's left next time.
+ */
+ if (thread->quantum_remaining < min_std_quantum) {
+ thread->reason |= AST_QUANTUM;
+ thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
+ }
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+ }
+
+ /*
+ * If we are doing a direct handoff then
+ * take the remainder of the quantum.
+ */
+ if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
+ self->quantum_remaining = thread->quantum_remaining;
+ thread->reason |= AST_QUANTUM;
+ thread->quantum_remaining = 0;
+ } else {
+#if defined(CONFIG_SCHED_MULTIQ)
+ if (SCHED(sched_groups_enabled) &&
+ thread->sched_group == self->sched_group) {
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
+ self->reason, (uintptr_t)thread_tid(thread),
+ self->quantum_remaining, thread->quantum_remaining, 0);
+
+ self->quantum_remaining = thread->quantum_remaining;
+ thread->quantum_remaining = 0;
+ /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
+ }
+#endif /* defined(CONFIG_SCHED_MULTIQ) */
+ }
+
+ thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
+
+ if ((thread->rwlock_count != 0) && !(LcksOpts & disLkRWPrio)) {
+ integer_t priority;
+
+ priority = thread->sched_pri;
+
+ if (priority < thread->base_pri)
+ priority = thread->base_pri;
+ if (priority < BASEPRI_BACKGROUND)
+ priority = BASEPRI_BACKGROUND;
+
+ if ((thread->sched_pri < priority) || !(thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), thread->sched_pri, thread->base_pri, priority, 0);
+
+ thread->sched_flags |= TH_SFLAG_RW_PROMOTED;
+
+ if (thread->sched_pri < priority)
+ set_sched_pri(thread, priority);
+ }
+ }
+
+ if (!(thread->state & TH_WAIT)) {
+ /*
+ * Still runnable.
+ */
+ thread->last_made_runnable_time = mach_approximate_time();
+
+ machine_thread_going_off_core(thread, FALSE);
+
+ if (thread->reason & AST_QUANTUM)
+ thread_setrun(thread, SCHED_TAILQ);
+ else if (thread->reason & AST_PREEMPT)
+ thread_setrun(thread, SCHED_HEADQ);
+ else
+ thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), thread->reason, thread->state, sched_run_count, 0);
+
+ if (thread->wake_active) {
+ thread->wake_active = FALSE;
+ thread_unlock(thread);
+
+ thread_wakeup(&thread->wake_active);
+ } else {
+ thread_unlock(thread);
+ }
+
+ wake_unlock(thread);
+ } else {
+ /*
+ * Waiting.
+ */
+ boolean_t should_terminate = FALSE;
+ uint32_t new_run_count;
+
+ /* Only the first call to thread_dispatch
+ * after explicit termination should add
+ * the thread to the termination queue
+ */
+ if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) {
+ should_terminate = TRUE;
+ thread->state |= TH_TERMINATE2;
+ }
+
+ thread->state &= ~TH_RUN;
+ thread->last_made_runnable_time = ~0ULL;
+ thread->chosen_processor = PROCESSOR_NULL;
+
+ if (thread->sched_mode == TH_MODE_TIMESHARE) {
+ if (thread->sched_flags & TH_SFLAG_THROTTLED)
+ sched_background_decr(thread);
+
+ sched_share_decr(thread);
+ }
+ new_run_count = sched_run_decr(thread);
+
+#if CONFIG_SCHED_SFI
+ if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
+ if (thread->reason & AST_SFI) {
+ thread->wait_sfi_begin_time = processor->last_dispatch;
+ }
+ }
+#endif
+
+ machine_thread_going_off_core(thread, should_terminate);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), thread->reason, thread->state, new_run_count, 0);
+
+ (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
+
+ if (thread->wake_active) {
+ thread->wake_active = FALSE;
+ thread_unlock(thread);
+
+ thread_wakeup(&thread->wake_active);
+ } else {
+ thread_unlock(thread);
+ }
+
+ wake_unlock(thread);
+
+ if (should_terminate)
+ thread_terminate_enqueue(thread);
+ }
+ }
+ }
+
+ /* Update (new) current thread and reprogram quantum timer */
+ thread_lock(self);
+ if (!(self->state & TH_IDLE)) {
+ uint64_t arg1, arg2;
+ int urgency;
+ uint64_t latency;
+
+#if CONFIG_SCHED_SFI
+ ast_t new_ast;
+
+ new_ast = sfi_thread_needs_ast(self, NULL);
+
+ if (new_ast != AST_NONE) {
+ ast_on(new_ast);
+ }
+#endif
+
+ assert(processor->last_dispatch >= self->last_made_runnable_time);
+ latency = processor->last_dispatch - self->last_made_runnable_time;
+
+ urgency = thread_get_urgency(self, &arg1, &arg2);
+
+ thread_tell_urgency(urgency, arg1, arg2, latency, self);
+
+ machine_thread_going_on_core(self, urgency, latency);
+
+ /*
+ * Get a new quantum if none remaining.
+ */
+ if (self->quantum_remaining == 0) {
+ thread_quantum_init(self);
+ }
+
+ /*
+ * Set up quantum timer and timeslice.
+ */
+ processor->quantum_end = processor->last_dispatch + self->quantum_remaining;
+ timer_call_enter1(&processor->quantum_timer, self, processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
+
+ processor->first_timeslice = TRUE;
+ } else {
+ timer_call_cancel(&processor->quantum_timer);
+ processor->first_timeslice = FALSE;
+
+ thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
+ machine_thread_going_on_core(self, THREAD_URGENCY_NONE, 0);
+ }
+
+ self->computation_epoch = processor->last_dispatch;
+ self->reason = AST_NONE;
+
+ thread_unlock(self);
+
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+ /*
+ * TODO: Can we state that redispatching our old thread is also
+ * uninteresting?
+ */
+ if ((((volatile uint32_t)sched_run_count) == 1) &&
+ !(self->state & TH_IDLE)) {
+ pset_cancel_deferred_dispatch(processor->processor_set, processor);
+ }
+#endif
+
+}
+
+/*
+ * thread_block_reason:
+ *
+ * Forces a reschedule, blocking the caller if a wait
+ * has been asserted.
+ *
+ * If a continuation is specified, then thread_invoke will
+ * attempt to discard the thread's kernel stack. When the
+ * thread resumes, it will execute the continuation function
+ * on a new kernel stack.
+ */
+counter(mach_counter_t c_thread_block_calls = 0;)
+
+wait_result_t
+thread_block_reason(
+ thread_continue_t continuation,
+ void *parameter,
+ ast_t reason)
+{
+ thread_t self = current_thread();
+ processor_t processor;
+ thread_t new_thread;
+ spl_t s;
+
+ counter(++c_thread_block_calls);
+
+ s = splsched();
+
+ processor = current_processor();
+
+ /* If we're explicitly yielding, force a subsequent quantum */
+ if (reason & AST_YIELD)
+ processor->first_timeslice = FALSE;
+
+ /* We're handling all scheduling AST's */
+ ast_off(AST_SCHEDULING);
+
+ self->continuation = continuation;
+ self->parameter = parameter;
+
+ if (self->state & ~(TH_RUN | TH_IDLE)) {
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_BLOCK),
+ reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
+ }
+
+ do {
+ thread_lock(self);
+ new_thread = thread_select(self, processor, reason);
+ thread_unlock(self);
+ } while (!thread_invoke(self, new_thread, reason));
+
+ splx(s);
+
+ return (self->wait_result);
+}
+
+/*
+ * thread_block:
+ *
+ * Block the current thread if a wait has been asserted.
+ */
+wait_result_t
+thread_block(
+ thread_continue_t continuation)
+{
+ return thread_block_reason(continuation, NULL, AST_NONE);
+}
+
+wait_result_t
+thread_block_parameter(
+ thread_continue_t continuation,
+ void *parameter)
+{
+ return thread_block_reason(continuation, parameter, AST_NONE);
+}
+
+/*
+ * thread_run:
+ *
+ * Switch directly from the current thread to the
+ * new thread, handing off our quantum if appropriate.
+ *
+ * New thread must be runnable, and not on a run queue.
+ *
+ * Called at splsched.
+ */
+int
+thread_run(
+ thread_t self,
+ thread_continue_t continuation,
+ void *parameter,
+ thread_t new_thread)
+{
+ ast_t handoff = AST_HANDOFF;
+
+ self->continuation = continuation;
+ self->parameter = parameter;
+
+ while (!thread_invoke(self, new_thread, handoff)) {
+ processor_t processor = current_processor();
+
+ thread_lock(self);
+ new_thread = thread_select(self, processor, AST_NONE);
+ thread_unlock(self);
+ handoff = AST_NONE;
+ }
+
+ return (self->wait_result);
+}
+
+/*
+ * thread_continue:
+ *
+ * Called at splsched when a thread first receives
+ * a new stack after a continuation.
+ */
+void
+thread_continue(
+ thread_t thread)
+{
+ thread_t self = current_thread();
+ thread_continue_t continuation;
+ void *parameter;
+
+ DTRACE_SCHED(on__cpu);
+
+ continuation = self->continuation;
+ parameter = self->parameter;
+
+ thread_dispatch(thread, self);
+
+ self->continuation = self->parameter = NULL;
+
+ if (thread != THREAD_NULL)
+ (void)spllo();
+
+ TLOG(1, "thread_continue: calling call_continuation \n");
+ call_continuation(continuation, parameter, self->wait_result);
+ /*NOTREACHED*/
+}
+
+void
+thread_quantum_init(thread_t thread)
+{
+ if (thread->sched_mode == TH_MODE_REALTIME) {
+ thread->quantum_remaining = thread->realtime.computation;
+ } else {
+ thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
+ }
+}
+
+uint32_t
+sched_timeshare_initial_quantum_size(thread_t thread)
+{
+ if ((thread == THREAD_NULL) || !(thread->sched_flags & TH_SFLAG_THROTTLED))
+ return std_quantum;
+ else
+ return bg_quantum;
+}
+
+/*
+ * run_queue_init:
+ *
+ * Initialize a run queue before first use.
+ */
+void
+run_queue_init(
+ run_queue_t rq)
+{
+ int i;
+
+ rq->highq = IDLEPRI;
+ for (i = 0; i < NRQBM; i++)
+ rq->bitmap[i] = 0;
+ setbit(MAXPRI - IDLEPRI, rq->bitmap);
+ rq->urgency = rq->count = 0;
+ for (i = 0; i < NRQS; i++)
+ queue_init(&rq->queues[i]);
+}
+
+/*
+ * run_queue_dequeue:
+ *
+ * Perform a dequeue operation on a run queue,
+ * and return the resulting thread.
+ *
+ * The run queue must be locked (see thread_run_queue_remove()
+ * for more info), and not empty.
+ */
+thread_t
+run_queue_dequeue(
+ run_queue_t rq,
+ integer_t options)
+{
+ thread_t thread;
+ queue_t queue = rq->queues + rq->highq;
+
+ if (options & SCHED_HEADQ) {
+ thread = (thread_t)dequeue_head(queue);
+ }
+ else {
+ thread = (thread_t)dequeue_tail(queue);
+ }
+
+ thread->runq = PROCESSOR_NULL;
+ SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
+ rq->count--;
+ if (SCHED(priority_is_urgent)(rq->highq)) {
+ rq->urgency--; assert(rq->urgency >= 0);
+ }
+ if (queue_empty(queue)) {
+ if (rq->highq != IDLEPRI)
+ clrbit(MAXPRI - rq->highq, rq->bitmap);
+ rq->highq = MAXPRI - ffsbit(rq->bitmap);
+ }
return (thread);
}
+/*
+ * run_queue_enqueue:
+ *
+ * Perform a enqueue operation on a run queue.
+ *
+ * The run queue must be locked (see thread_run_queue_remove()
+ * for more info).
+ */
+boolean_t
+run_queue_enqueue(
+ run_queue_t rq,
+ thread_t thread,
+ integer_t options)
+{
+ queue_t queue = rq->queues + thread->sched_pri;
+ boolean_t result = FALSE;
+
+ if (queue_empty(queue)) {
+ enqueue_tail(queue, (queue_entry_t)thread);
+
+ setbit(MAXPRI - thread->sched_pri, rq->bitmap);
+ if (thread->sched_pri > rq->highq) {
+ rq->highq = thread->sched_pri;
+ result = TRUE;
+ }
+ } else {
+ if (options & SCHED_TAILQ)
+ enqueue_tail(queue, (queue_entry_t)thread);
+ else
+ enqueue_head(queue, (queue_entry_t)thread);
+ }
+ if (SCHED(priority_is_urgent)(thread->sched_pri))
+ rq->urgency++;
+ SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
+ rq->count++;
+
+ return (result);
+
+}
+
+/*
+ * run_queue_remove:
+ *
+ * Remove a specific thread from a runqueue.
+ *
+ * The run queue must be locked.
+ */
+void
+run_queue_remove(
+ run_queue_t rq,
+ thread_t thread)
+{
+
+ remqueue((queue_entry_t)thread);
+ SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
+ rq->count--;
+ if (SCHED(priority_is_urgent)(thread->sched_pri)) {
+ rq->urgency--; assert(rq->urgency >= 0);
+ }
+
+ if (queue_empty(rq->queues + thread->sched_pri)) {
+ /* update run queue status */
+ if (thread->sched_pri != IDLEPRI)
+ clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
+ rq->highq = MAXPRI - ffsbit(rq->bitmap);
+ }
+
+ thread->runq = PROCESSOR_NULL;
+}
+
+/* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
+void
+rt_runq_scan(sched_update_scan_context_t scan_context)
+{
+ spl_t s;
+ thread_t thread;
+
+ s = splsched();
+ rt_lock_lock();
+
+ qe_foreach_element_safe(thread, &rt_runq.queue, links) {
+ if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
+ scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
+ }
+ }
+
+ rt_lock_unlock();
+ splx(s);
+}
+
/*
- * Stop running the current thread and start running the new thread.
- * If continuation is non-zero, and the current thread is blocked,
- * then it will resume by executing continuation on a new stack.
- * Returns TRUE if the hand-off succeeds.
- * The reason parameter == AST_QUANTUM if the thread blocked
- * because its quantum expired.
- * Assumes splsched.
+ * realtime_queue_insert:
+ *
+ * Enqueue a thread for realtime execution.
*/
+static boolean_t
+realtime_queue_insert(
+ thread_t thread)
+{
+ queue_t queue = &rt_runq.queue;
+ uint64_t deadline = thread->realtime.deadline;
+ boolean_t preempt = FALSE;
+ rt_lock_lock();
-static thread_t
-__current_thread(void)
+ if (queue_empty(queue)) {
+ enqueue_tail(queue, (queue_entry_t)thread);
+ preempt = TRUE;
+ }
+ else {
+ register thread_t entry = (thread_t)queue_first(queue);
+
+ while (TRUE) {
+ if ( queue_end(queue, (queue_entry_t)entry) ||
+ deadline < entry->realtime.deadline ) {
+ entry = (thread_t)queue_prev((queue_entry_t)entry);
+ break;
+ }
+
+ entry = (thread_t)queue_next((queue_entry_t)entry);
+ }
+
+ if ((queue_entry_t)entry == queue)
+ preempt = TRUE;
+
+ insque((queue_entry_t)thread, (queue_entry_t)entry);
+ }
+
+ thread->runq = THREAD_ON_RT_RUNQ;
+ SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count);
+ rt_runq.count++;
+
+ rt_lock_unlock();
+
+ return (preempt);
+}
+
+/*
+ * realtime_setrun:
+ *
+ * Dispatch a thread for realtime execution.
+ *
+ * Thread must be locked. Associated pset must
+ * be locked, and is returned unlocked.
+ */
+static void
+realtime_setrun(
+ processor_t processor,
+ thread_t thread)
{
- return (current_thread());
+ processor_set_t pset = processor->processor_set;
+ ast_t preempt;
+
+ boolean_t do_signal_idle = FALSE, do_cause_ast = FALSE;
+
+ thread->chosen_processor = processor;
+
+ /* <rdar://problem/15102234> */
+ assert(thread->bound_processor == PROCESSOR_NULL);
+
+ /*
+ * Dispatch directly onto idle processor.
+ */
+ if ( (thread->bound_processor == processor)
+ && processor->state == PROCESSOR_IDLE) {
+ remqueue((queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
+
+ processor->next_thread = thread;
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->deadline = thread->realtime.deadline;
+ processor->state = PROCESSOR_DISPATCHING;
+
+ if (processor != current_processor()) {
+ if (!(pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))) {
+ /* cleared on exit from main processor_idle() loop */
+ pset->pending_AST_cpu_mask |= (1ULL << processor->cpu_id);
+ do_signal_idle = TRUE;
+ }
+ }
+ pset_unlock(pset);
+
+ if (do_signal_idle) {
+ machine_signal_idle(processor);
+ }
+ return;
+ }
+
+ if (processor->current_pri < BASEPRI_RTQUEUES)
+ preempt = (AST_PREEMPT | AST_URGENT);
+ else if (thread->realtime.deadline < processor->deadline)
+ preempt = (AST_PREEMPT | AST_URGENT);
+ else
+ preempt = AST_NONE;
+
+ realtime_queue_insert(thread);
+
+ if (preempt != AST_NONE) {
+ if (processor->state == PROCESSOR_IDLE) {
+ remqueue((queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
+ processor->next_thread = THREAD_NULL;
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->deadline = thread->realtime.deadline;
+ processor->state = PROCESSOR_DISPATCHING;
+ if (processor == current_processor()) {
+ ast_on(preempt);
+ } else {
+ if (!(pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))) {
+ /* cleared on exit from main processor_idle() loop */
+ pset->pending_AST_cpu_mask |= (1ULL << processor->cpu_id);
+ do_signal_idle = TRUE;
+ }
+ }
+ } else if (processor->state == PROCESSOR_DISPATCHING) {
+ if ((processor->next_thread == THREAD_NULL) && ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline))) {
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->deadline = thread->realtime.deadline;
+ }
+ } else {
+ if (processor == current_processor()) {
+ ast_on(preempt);
+ } else {
+ if (!(pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))) {
+ /* cleared after IPI causes csw_check() to be called */
+ pset->pending_AST_cpu_mask |= (1ULL << processor->cpu_id);
+ do_cause_ast = TRUE;
+ }
+ }
+ }
+ } else {
+ /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
+ }
+
+ pset_unlock(pset);
+
+ if (do_signal_idle) {
+ machine_signal_idle(processor);
+ } else if (do_cause_ast) {
+ cause_ast_check(processor);
+ }
}
+
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+
boolean_t
-thread_invoke(
- register thread_t old_thread,
- register thread_t new_thread,
- int reason,
- void (*continuation)(void))
+priority_is_urgent(int priority)
+{
+ return testbit(priority, sched_preempt_pri) ? TRUE : FALSE;
+}
+
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+/*
+ * processor_setrun:
+ *
+ * Dispatch a thread for execution on a
+ * processor.
+ *
+ * Thread must be locked. Associated pset must
+ * be locked, and is returned unlocked.
+ */
+static void
+processor_setrun(
+ processor_t processor,
+ thread_t thread,
+ integer_t options)
+{
+ processor_set_t pset = processor->processor_set;
+ ast_t preempt;
+ enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
+ enum { eNoSignal, eDoSignal, eDoDeferredSignal } do_signal_idle = eNoSignal;
+
+ boolean_t do_cause_ast = FALSE;
+
+ thread->chosen_processor = processor;
+
+ /*
+ * Dispatch directly onto idle processor.
+ */
+ if ( (SCHED(direct_dispatch_to_idle_processors) ||
+ thread->bound_processor == processor)
+ && processor->state == PROCESSOR_IDLE) {
+ remqueue((queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
+
+ processor->next_thread = thread;
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->deadline = UINT64_MAX;
+ processor->state = PROCESSOR_DISPATCHING;
+
+ if (!(pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))) {
+ /* cleared on exit from main processor_idle() loop */
+ pset->pending_AST_cpu_mask |= (1ULL << processor->cpu_id);
+ do_signal_idle = eDoSignal;
+ }
+
+ pset_unlock(pset);
+
+ if (do_signal_idle == eDoSignal) {
+ machine_signal_idle(processor);
+ }
+
+ return;
+ }
+
+ /*
+ * Set preemption mode.
+ */
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+ /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
+#endif
+ if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri)
+ preempt = (AST_PREEMPT | AST_URGENT);
+ else if(processor->active_thread && thread_eager_preemption(processor->active_thread))
+ preempt = (AST_PREEMPT | AST_URGENT);
+ else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
+ if(SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
+ preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
+ } else {
+ preempt = AST_NONE;
+ }
+ } else
+ preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
+
+ SCHED(processor_enqueue)(processor, thread, options);
+
+ if (preempt != AST_NONE) {
+ if (processor->state == PROCESSOR_IDLE) {
+ remqueue((queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
+ processor->next_thread = THREAD_NULL;
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->deadline = UINT64_MAX;
+ processor->state = PROCESSOR_DISPATCHING;
+
+ ipi_action = eExitIdle;
+ } else if ( processor->state == PROCESSOR_DISPATCHING) {
+ if ((processor->next_thread == THREAD_NULL) && (processor->current_pri < thread->sched_pri)) {
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->deadline = UINT64_MAX;
+ }
+ } else if ( (processor->state == PROCESSOR_RUNNING ||
+ processor->state == PROCESSOR_SHUTDOWN) &&
+ (thread->sched_pri >= processor->current_pri)) {
+ ipi_action = eInterruptRunning;
+ }
+ } else {
+ /*
+ * New thread is not important enough to preempt what is running, but
+ * special processor states may need special handling
+ */
+ if (processor->state == PROCESSOR_SHUTDOWN &&
+ thread->sched_pri >= processor->current_pri ) {
+ ipi_action = eInterruptRunning;
+ } else if ( processor->state == PROCESSOR_IDLE &&
+ processor != current_processor() ) {
+ remqueue((queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
+ processor->next_thread = THREAD_NULL;
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class;
+ processor->deadline = UINT64_MAX;
+ processor->state = PROCESSOR_DISPATCHING;
+
+ ipi_action = eExitIdle;
+ }
+ }
+
+ switch (ipi_action) {
+ case eDoNothing:
+ break;
+ case eExitIdle:
+ if (processor == current_processor()) {
+ if (csw_check_locked(processor, pset, AST_NONE) != AST_NONE)
+ ast_on(preempt);
+ } else {
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+ if (!(pset->pending_deferred_AST_cpu_mask & (1ULL << processor->cpu_id)) &&
+ !(pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))) {
+ /* cleared on exit from main processor_idle() loop */
+ pset->pending_deferred_AST_cpu_mask |= (1ULL << processor->cpu_id);
+ do_signal_idle = eDoDeferredSignal;
+ }
+#else
+ if (!(pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))) {
+ /* cleared on exit from main processor_idle() loop */
+ pset->pending_AST_cpu_mask |= (1ULL << processor->cpu_id);
+ do_signal_idle = eDoSignal;
+ }
+#endif
+ }
+ break;
+ case eInterruptRunning:
+ if (processor == current_processor()) {
+ if (csw_check_locked(processor, pset, AST_NONE) != AST_NONE)
+ ast_on(preempt);
+ } else {
+ if (!(pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))) {
+ /* cleared after IPI causes csw_check() to be called */
+ pset->pending_AST_cpu_mask |= (1ULL << processor->cpu_id);
+ do_cause_ast = TRUE;
+ }
+ }
+ break;
+ }
+
+ pset_unlock(pset);
+
+ if (do_signal_idle == eDoSignal) {
+ machine_signal_idle(processor);
+ }
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+ else if (do_signal_idle == eDoDeferredSignal) {
+ /*
+ * TODO: The ability to cancel this signal could make
+ * sending it outside of the pset lock an issue. Do
+ * we need to address this? Or would the only fallout
+ * be that the core takes a signal? As long as we do
+ * not run the risk of having a core marked as signal
+ * outstanding, with no real signal outstanding, the
+ * only result should be that we fail to cancel some
+ * signals.
+ */
+ machine_signal_idle_deferred(processor);
+ }
+#endif
+ else if (do_cause_ast) {
+ cause_ast_check(processor);
+ }
+}
+
+/*
+ * choose_next_pset:
+ *
+ * Return the next sibling pset containing
+ * available processors.
+ *
+ * Returns the original pset if none other is
+ * suitable.
+ */
+static processor_set_t
+choose_next_pset(
+ processor_set_t pset)
{
- sched_policy_t *policy;
- sf_return_t sfr;
- void (*lcont)(void);
+ processor_set_t nset = pset;
+
+ do {
+ nset = next_pset(nset);
+ } while (nset->online_processor_count < 1 && nset != pset);
+
+ return (nset);
+}
+/*
+ * choose_processor:
+ *
+ * Choose a processor for the thread, beginning at
+ * the pset. Accepts an optional processor hint in
+ * the pset.
+ *
+ * Returns a processor, possibly from a different pset.
+ *
+ * The thread must be locked. The pset must be locked,
+ * and the resulting pset is locked on return.
+ */
+processor_t
+choose_processor(
+ processor_set_t pset,
+ processor_t processor,
+ thread_t thread)
+{
+ processor_set_t nset, cset = pset;
+
/*
- * Mark thread interruptible.
+ * Prefer the hinted processor, when appropriate.
*/
- thread_lock(new_thread);
- new_thread->state &= ~TH_UNINT;
- if (cpu_data[cpu_number()].preemption_level != 1)
- panic("thread_invoke: preemption_level %d\n",
- cpu_data[cpu_number()].preemption_level);
+ /* Fold last processor hint from secondary processor to its primary */
+ if (processor != PROCESSOR_NULL) {
+ processor = processor->processor_primary;
+ }
+
+ /*
+ * Only consult platform layer if pset is active, which
+ * it may not be in some cases when a multi-set system
+ * is going to sleep.
+ */
+ if (pset->online_processor_count) {
+ if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
+ processor_t mc_processor = machine_choose_processor(pset, processor);
+ if (mc_processor != PROCESSOR_NULL)
+ processor = mc_processor->processor_primary;
+ }
+ }
+ /*
+ * At this point, we may have a processor hint, and we may have
+ * an initial starting pset. If the hint is not in the pset, or
+ * if the hint is for a processor in an invalid state, discard
+ * the hint.
+ */
+ if (processor != PROCESSOR_NULL) {
+ if (processor->processor_set != pset) {
+ processor = PROCESSOR_NULL;
+ } else if (!processor->is_recommended) {
+ processor = PROCESSOR_NULL;
+ } else {
+ switch (processor->state) {
+ case PROCESSOR_START:
+ case PROCESSOR_SHUTDOWN:
+ case PROCESSOR_OFF_LINE:
+ /*
+ * Hint is for a processor that cannot support running new threads.
+ */
+ processor = PROCESSOR_NULL;
+ break;
+ case PROCESSOR_IDLE:
+ /*
+ * Hint is for an idle processor. Assume it is no worse than any other
+ * idle processor. The platform layer had an opportunity to provide
+ * the "least cost idle" processor above.
+ */
+ return (processor);
+ break;
+ case PROCESSOR_RUNNING:
+ case PROCESSOR_DISPATCHING:
+ /*
+ * Hint is for an active CPU. This fast-path allows
+ * realtime threads to preempt non-realtime threads
+ * to regain their previous executing processor.
+ */
+ if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
+ (processor->current_pri < BASEPRI_RTQUEUES))
+ return (processor);
+
+ /* Otherwise, use hint as part of search below */
+ break;
+ default:
+ processor = PROCESSOR_NULL;
+ break;
+ }
+ }
+ }
- assert(thread_runnable(new_thread));
+ /*
+ * Iterate through the processor sets to locate
+ * an appropriate processor. Seed results with
+ * a last-processor hint, if available, so that
+ * a search must find something strictly better
+ * to replace it.
+ *
+ * A primary/secondary pair of SMT processors are
+ * "unpaired" if the primary is busy but its
+ * corresponding secondary is idle (so the physical
+ * core has full use of its resources).
+ */
- assert(old_thread->continuation == (void (*)(void))0);
+ integer_t lowest_priority = MAXPRI + 1;
+ integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
+ integer_t lowest_count = INT_MAX;
+ uint64_t furthest_deadline = 1;
+ processor_t lp_processor = PROCESSOR_NULL;
+ processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
+ processor_t lp_unpaired_secondary_processor = PROCESSOR_NULL;
+ processor_t lc_processor = PROCESSOR_NULL;
+ processor_t fd_processor = PROCESSOR_NULL;
+
+ if (processor != PROCESSOR_NULL) {
+ /* All other states should be enumerated above. */
+ assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
+
+ lowest_priority = processor->current_pri;
+ lp_processor = processor;
+
+ if (processor->current_pri >= BASEPRI_RTQUEUES) {
+ furthest_deadline = processor->deadline;
+ fd_processor = processor;
+ }
- if ((old_thread->sched_mode & TH_MODE_REALTIME) && (!old_thread->stack_privilege)) {
- old_thread->stack_privilege = old_thread->kernel_stack;
+ lowest_count = SCHED(processor_runq_count)(processor);
+ lc_processor = processor;
}
- if (continuation != (void (*)()) 0) {
- switch (new_thread->state & TH_STACK_STATE) {
- case TH_STACK_HANDOFF:
+ do {
+
+ /*
+ * Choose an idle processor, in pset traversal order
+ */
+ qe_foreach_element(processor, &cset->idle_queue, processor_queue) {
+ if (processor->is_recommended)
+ return processor;
+ }
+
+ /*
+ * Otherwise, enumerate active and idle processors to find candidates
+ * with lower priority/etc.
+ */
+
+ qe_foreach_element(processor, &cset->active_queue, processor_queue) {
+
+ if (!processor->is_recommended) {
+ continue;
+ }
+
+ integer_t cpri = processor->current_pri;
+ if (cpri < lowest_priority) {
+ lowest_priority = cpri;
+ lp_processor = processor;
+ }
+
+ if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
+ furthest_deadline = processor->deadline;
+ fd_processor = processor;
+ }
+
+ integer_t ccount = SCHED(processor_runq_count)(processor);
+ if (ccount < lowest_count) {
+ lowest_count = ccount;
+ lc_processor = processor;
+ }
+ }
+
+ /*
+ * For SMT configs, these idle secondary processors must have active primary. Otherwise
+ * the idle primary would have short-circuited the loop above
+ */
+ qe_foreach_element(processor, &cset->idle_secondary_queue, processor_queue) {
+
+ if (!processor->is_recommended) {
+ continue;
+ }
+
+ processor_t cprimary = processor->processor_primary;
+
+ /* If the primary processor is offline or starting up, it's not a candidate for this path */
+ if (cprimary->state == PROCESSOR_RUNNING || cprimary->state == PROCESSOR_DISPATCHING) {
+ integer_t primary_pri = cprimary->current_pri;
+
+ if (primary_pri < lowest_unpaired_primary_priority) {
+ lowest_unpaired_primary_priority = primary_pri;
+ lp_unpaired_primary_processor = cprimary;
+ lp_unpaired_secondary_processor = processor;
+ }
+ }
+ }
+
+
+ if (thread->sched_pri >= BASEPRI_RTQUEUES) {
+
+ /*
+ * For realtime threads, the most important aspect is
+ * scheduling latency, so we attempt to assign threads
+ * to good preemption candidates (assuming an idle primary
+ * processor was not available above).
+ */
+
+ if (thread->sched_pri > lowest_unpaired_primary_priority) {
+ /* Move to end of active queue so that the next thread doesn't also pick it */
+ re_queue_tail(&cset->active_queue, (queue_entry_t)lp_unpaired_primary_processor);
+ return lp_unpaired_primary_processor;
+ }
+ if (thread->sched_pri > lowest_priority) {
+ /* Move to end of active queue so that the next thread doesn't also pick it */
+ re_queue_tail(&cset->active_queue, (queue_entry_t)lp_processor);
+ return lp_processor;
+ }
+ if (thread->realtime.deadline < furthest_deadline)
+ return fd_processor;
+
+ /*
+ * If all primary and secondary CPUs are busy with realtime
+ * threads with deadlines earlier than us, move on to next
+ * pset.
+ */
+ }
+ else {
+
+ if (thread->sched_pri > lowest_unpaired_primary_priority) {
+ /* Move to end of active queue so that the next thread doesn't also pick it */
+ re_queue_tail(&cset->active_queue, (queue_entry_t)lp_unpaired_primary_processor);
+ return lp_unpaired_primary_processor;
+ }
+ if (thread->sched_pri > lowest_priority) {
+ /* Move to end of active queue so that the next thread doesn't also pick it */
+ re_queue_tail(&cset->active_queue, (queue_entry_t)lp_processor);
+ return lp_processor;
+ }
+
+ /*
+ * If all primary processor in this pset are running a higher
+ * priority thread, move on to next pset. Only when we have
+ * exhausted this search do we fall back to other heuristics.
+ */
+ }
+
+ /*
+ * Move onto the next processor set.
+ */
+ nset = next_pset(cset);
+
+ if (nset != pset) {
+ pset_unlock(cset);
+
+ cset = nset;
+ pset_lock(cset);
+ }
+ } while (nset != pset);
+
+ /*
+ * Make sure that we pick a running processor,
+ * and that the correct processor set is locked.
+ * Since we may have unlock the candidate processor's
+ * pset, it may have changed state.
+ *
+ * All primary processors are running a higher priority
+ * thread, so the only options left are enqueuing on
+ * the secondary processor that would perturb the least priority
+ * primary, or the least busy primary.
+ */
+ do {
+
+ /* lowest_priority is evaluated in the main loops above */
+ if (lp_unpaired_secondary_processor != PROCESSOR_NULL) {
+ processor = lp_unpaired_secondary_processor;
+ lp_unpaired_secondary_processor = PROCESSOR_NULL;
+ } else if (lc_processor != PROCESSOR_NULL) {
+ processor = lc_processor;
+ lc_processor = PROCESSOR_NULL;
+ } else {
+ /*
+ * All processors are executing higher
+ * priority threads, and the lowest_count
+ * candidate was not usable
+ */
+ processor = master_processor;
+ }
+
+ /*
+ * Check that the correct processor set is
+ * returned locked.
+ */
+ if (cset != processor->processor_set) {
+ pset_unlock(cset);
+ cset = processor->processor_set;
+ pset_lock(cset);
+ }
/*
- * If the old thread has stack privilege, we can't give
- * his stack away. So go and get him one and treat this
- * as a traditional context switch.
+ * We must verify that the chosen processor is still available.
+ * master_processor is an exception, since we may need to preempt
+ * a running thread on it during processor shutdown (for sleep),
+ * and that thread needs to be enqueued on its runqueue to run
+ * when the processor is restarted.
*/
- if (old_thread->stack_privilege == current_stack())
- goto get_new_stack;
+ if (processor != master_processor && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE))
+ processor = PROCESSOR_NULL;
+
+ } while (processor == PROCESSOR_NULL);
+
+ return (processor);
+}
+
+/*
+ * thread_setrun:
+ *
+ * Dispatch thread for execution, onto an idle
+ * processor or run queue, and signal a preemption
+ * as appropriate.
+ *
+ * Thread must be locked.
+ */
+void
+thread_setrun(
+ thread_t thread,
+ integer_t options)
+{
+ processor_t processor;
+ processor_set_t pset;
+
+ assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
+ assert(thread->runq == PROCESSOR_NULL);
+ /*
+ * Update priority if needed.
+ */
+ if (SCHED(can_update_priority)(thread))
+ SCHED(update_priority)(thread);
+
+ thread->sfi_class = sfi_thread_classify(thread);
+
+ assert(thread->runq == PROCESSOR_NULL);
+
+#if __SMP__
+ if (thread->bound_processor == PROCESSOR_NULL) {
/*
- * Make the whole handoff/dispatch atomic to match the
- * non-handoff case.
+ * Unbound case.
*/
- disable_preemption();
+ if (thread->affinity_set != AFFINITY_SET_NULL) {
+ /*
+ * Use affinity set policy hint.
+ */
+ pset = thread->affinity_set->aset_pset;
+ pset_lock(pset);
- /*
- * Set up ast context of new thread and switch to its timer.
- */
- new_thread->state &= ~(TH_STACK_HANDOFF|TH_UNINT);
- new_thread->last_processor = current_processor();
- ast_context(new_thread->top_act, cpu_number());
- timer_switch(&new_thread->system_timer);
- thread_unlock(new_thread);
+ processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
- old_thread->continuation = continuation;
- stack_handoff(old_thread, new_thread);
+ SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
+ } else if (thread->last_processor != PROCESSOR_NULL) {
+ /*
+ * Simple (last processor) affinity case.
+ */
+ processor = thread->last_processor;
+ pset = processor->processor_set;
+ pset_lock(pset);
+ processor = SCHED(choose_processor)(pset, processor, thread);
- wake_lock(old_thread);
- thread_lock(old_thread);
- act_machine_sv_free(old_thread->top_act);
+ SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0);
+ } else {
+ /*
+ * No Affinity case:
+ *
+ * Utilitize a per task hint to spread threads
+ * among the available processor sets.
+ */
+ task_t task = thread->task;
- /*
- * inline thread_dispatch but don't free stack
- */
+ pset = task->pset_hint;
+ if (pset == PROCESSOR_SET_NULL)
+ pset = current_processor()->processor_set;
- switch (old_thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
- sched_policy_t *policy;
- sf_return_t sfr;
-
- case TH_RUN | TH_UNINT:
- case TH_RUN:
- /*
- * No reason to stop. Put back on a run queue.
- */
- old_thread->state |= TH_STACK_HANDOFF;
-
- /* Get pointer to scheduling policy "object" */
- policy = &sched_policy[old_thread->policy];
-
- /* Leave enqueueing thread up to scheduling policy */
- sfr = policy->sp_ops.sp_thread_dispatch(policy, old_thread);
- assert(sfr == SF_SUCCESS);
- break;
-
- case TH_RUN | TH_WAIT | TH_UNINT:
- case TH_RUN | TH_WAIT:
- old_thread->sleep_stamp = sched_tick;
- /* fallthrough */
-
- case TH_WAIT: /* this happens! */
- /*
- * Waiting
- */
- old_thread->state |= TH_STACK_HANDOFF;
- old_thread->state &= ~TH_RUN;
- if (old_thread->state & TH_TERMINATE)
- thread_reaper_enqueue(old_thread);
-
- if (old_thread->wake_active) {
- old_thread->wake_active = FALSE;
- thread_unlock(old_thread);
- wake_unlock(old_thread);
- thread_wakeup((event_t)&old_thread->wake_active);
- wake_lock(old_thread);
- thread_lock(old_thread);
- }
- break;
-
- case TH_RUN | TH_IDLE:
- /*
- * Drop idle thread -- it is already in
- * idle_thread_array.
- */
- old_thread->state |= TH_STACK_HANDOFF;
- break;
-
- default:
- panic("State 0x%x \n",old_thread->state);
- }
-
- /* Get pointer to scheduling policy "object" */
- policy = &sched_policy[old_thread->policy];
-
- /* Indicate to sched policy that old thread has stopped execution */
- /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
- sfr = policy->sp_ops.sp_thread_done(policy, old_thread);
- assert(sfr == SF_SUCCESS);
- thread_unlock(old_thread);
- wake_unlock(old_thread);
- thread_lock(new_thread);
-
- assert(thread_runnable(new_thread));
-
- /* Get pointer to scheduling policy "object" */
- policy = &sched_policy[new_thread->policy];
-
- /* Indicate to sched policy that new thread has started execution */
- /*** ??? maybe use a macro ***/
- sfr = policy->sp_ops.sp_thread_begin(policy, new_thread);
- assert(sfr == SF_SUCCESS);
-
- lcont = new_thread->continuation;
- new_thread->continuation = (void(*)(void))0;
-
- thread_unlock(new_thread);
- enable_preemption();
-
- counter_always(c_thread_invoke_hits++);
-
- if (new_thread->funnel_state & TH_FN_REFUNNEL) {
- kern_return_t save_wait_result;
- new_thread->funnel_state = 0;
- save_wait_result = new_thread->wait_result;
- KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0);
- //mutex_lock(new_thread->funnel_lock);
- funnel_lock(new_thread->funnel_lock);
- KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0);
- new_thread->funnel_state = TH_FN_OWNED;
- new_thread->wait_result = save_wait_result;
- }
- (void) spllo();
+ pset = choose_next_pset(pset);
+ pset_lock(pset);
- assert(lcont);
- call_continuation(lcont);
- /*NOTREACHED*/
- return TRUE;
+ processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
+ task->pset_hint = processor->processor_set;
- case TH_STACK_COMING_IN:
+ SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
+ }
+ } else {
/*
- * waiting for a stack
+ * Bound case:
+ *
+ * Unconditionally dispatch on the processor.
*/
- thread_swapin(new_thread);
- thread_unlock(new_thread);
- counter_always(c_thread_invoke_misses++);
- return FALSE;
+ processor = thread->bound_processor;
+ pset = processor->processor_set;
+ pset_lock(pset);
- case 0:
- /*
- * already has a stack - can't handoff
- */
- if (new_thread == old_thread) {
-
- /* same thread but with continuation */
- counter(++c_thread_invoke_same);
- thread_unlock(new_thread);
-
- if (old_thread->funnel_state & TH_FN_REFUNNEL) {
- kern_return_t save_wait_result;
-
- old_thread->funnel_state = 0;
- save_wait_result = old_thread->wait_result;
- KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0);
- funnel_lock(old_thread->funnel_lock);
- KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0);
- old_thread->funnel_state = TH_FN_OWNED;
- old_thread->wait_result = save_wait_result;
- }
- (void) spllo();
- call_continuation(continuation);
- /*NOTREACHED*/
- }
- break;
- }
- } else {
- /*
- * check that the new thread has a stack
- */
- if (new_thread->state & TH_STACK_STATE) {
- get_new_stack:
- /* has no stack. if not already waiting for one try to get one */
- if ((new_thread->state & TH_STACK_COMING_IN) ||
- /* not already waiting. nonblocking try to get one */
- !stack_alloc_try(new_thread, thread_continue))
- {
- /* couldn't get one. schedule new thread to get a stack and
- return failure so we can try another thread. */
- thread_swapin(new_thread);
- thread_unlock(new_thread);
- counter_always(c_thread_invoke_misses++);
- return FALSE;
- }
- } else if (old_thread == new_thread) {
- counter(++c_thread_invoke_same);
- thread_unlock(new_thread);
- return TRUE;
- }
-
- /* new thread now has a stack. it has been setup to resume in
- thread_continue so it can dispatch the old thread, deal with
- funnelling and then go to it's true continuation point */
- }
-
- new_thread->state &= ~(TH_STACK_HANDOFF | TH_UNINT);
+ SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
+ }
+#else /* !__SMP__ */
+ /* Only one processor to choose */
+ assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == master_processor);
+ processor = master_processor;
+ pset = processor->processor_set;
+ pset_lock(pset);
+#endif /* !__SMP__ */
/*
- * Set up ast context of new thread and switch to its timer.
- */
- new_thread->last_processor = current_processor();
- ast_context(new_thread->top_act, cpu_number());
- timer_switch(&new_thread->system_timer);
- assert(thread_runnable(new_thread));
-
- /*
- * N.B. On return from the call to switch_context, 'old_thread'
- * points at the thread that yielded to us. Unfortunately, at
- * this point, there are no simple_locks held, so if we are preempted
- * before the call to thread_dispatch blocks preemption, it is
- * possible for 'old_thread' to terminate, leaving us with a
- * stale thread pointer.
+ * Dispatch the thread on the chosen processor.
+ * TODO: This should be based on sched_mode, not sched_pri
*/
- disable_preemption();
-
- thread_unlock(new_thread);
-
- counter_always(c_thread_invoke_csw++);
- current_task()->csw++;
+ if (thread->sched_pri >= BASEPRI_RTQUEUES)
+ realtime_setrun(processor, thread);
+ else
+ processor_setrun(processor, thread, options);
+}
-
- thread_lock(old_thread);
- old_thread->reason = reason;
- assert(old_thread->runq == RUN_QUEUE_NULL);
+processor_set_t
+task_choose_pset(
+ task_t task)
+{
+ processor_set_t pset = task->pset_hint;
- if (continuation != (void (*)(void))0)
- old_thread->continuation = continuation;
+ if (pset != PROCESSOR_SET_NULL)
+ pset = choose_next_pset(pset);
- /* Indicate to sched policy that old thread has stopped execution */
- policy = &sched_policy[old_thread->policy];
- /*** ??? maybe use a macro -- ***/
- sfr = policy->sp_ops.sp_thread_done(policy, old_thread);
- assert(sfr == SF_SUCCESS);
- thread_unlock(old_thread);
+ return (pset);
+}
- /*
- * switch_context is machine-dependent. It does the
- * machine-dependent components of a context-switch, like
- * changing address spaces. It updates active_threads.
- */
- old_thread = switch_context(old_thread, continuation, new_thread);
-
- /* Now on new thread's stack. Set a local variable to refer to it. */
- new_thread = __current_thread();
- assert(old_thread != new_thread);
+/*
+ * Check for a preemption point in
+ * the current context.
+ *
+ * Called at splsched with thread locked.
+ */
+ast_t
+csw_check(
+ processor_t processor,
+ ast_t check_reason)
+{
+ processor_set_t pset = processor->processor_set;
+ ast_t result;
- assert(thread_runnable(new_thread));
+ pset_lock(pset);
- thread_lock(new_thread);
- assert(thread_runnable(new_thread));
- /* Indicate to sched policy that new thread has started execution */
- policy = &sched_policy[new_thread->policy];
- /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
- sfr = policy->sp_ops.sp_thread_begin(policy, new_thread);
- assert(sfr == SF_SUCCESS);
- thread_unlock(new_thread);
+ /* If we were sent a remote AST and interrupted a running processor, acknowledge it here with pset lock held */
+ pset->pending_AST_cpu_mask &= ~(1ULL << processor->cpu_id);
- /*
- * We're back. Now old_thread is the thread that resumed
- * us, and we have to dispatch it.
- */
- /* CHECKME! */
-// Code from OSF in Grenoble deleted the following fields. They were
-// used in HPPA and 386 code, but not in the PPC for other than
-// just setting and resetting. They didn't delete these lines from
-// the MACH_RT builds, though, causing compile errors. I'm going
-// to make a wild guess and assume we can just delete these.
-#if 0
- if (old_thread->preempt == TH_NOT_PREEMPTABLE) {
- /*
- * Mark that we have been really preempted
- */
- old_thread->preempt = TH_PREEMPTED;
- }
-#endif
- thread_dispatch(old_thread);
- enable_preemption();
+ result = csw_check_locked(processor, pset, check_reason);
- /* if we get here and 'continuation' is set that means the
- * switch_context() path returned and did not call out
- * to the continuation. we will do it manually here */
- if (continuation) {
- call_continuation(continuation);
- /* NOTREACHED */
- }
+ pset_unlock(pset);
- return TRUE;
+ return result;
}
/*
- * thread_continue:
- *
- * Called when the launching a new thread, at splsched();
+ * Check for preemption at splsched with
+ * pset and thread locked
*/
-void
-thread_continue(
- register thread_t old_thread)
+ast_t
+csw_check_locked(
+ processor_t processor,
+ processor_set_t pset __unused,
+ ast_t check_reason)
{
- register thread_t self;
- register void (*continuation)();
- sched_policy_t *policy;
- sf_return_t sfr;
+ ast_t result;
+ thread_t thread = processor->active_thread;
- self = current_thread();
-
- /*
- * We must dispatch the old thread and then
- * call the current thread's continuation.
- * There might not be an old thread, if we are
- * the first thread to run on this processor.
- */
- if (old_thread != THREAD_NULL) {
- thread_dispatch(old_thread);
-
- thread_lock(self);
+ if (processor->first_timeslice) {
+ if (rt_runq.count > 0)
+ return (check_reason | AST_PREEMPT | AST_URGENT);
+ }
+ else {
+ if (rt_runq.count > 0) {
+ if (BASEPRI_RTQUEUES > processor->current_pri)
+ return (check_reason | AST_PREEMPT | AST_URGENT);
+ else
+ return (check_reason | AST_PREEMPT);
+ }
+ }
- /* Get pointer to scheduling policy "object" */
- policy = &sched_policy[self->policy];
+ result = SCHED(processor_csw_check)(processor);
+ if (result != AST_NONE)
+ return (check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE));
- /* Indicate to sched policy that new thread has started execution */
- /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
- sfr = policy->sp_ops.sp_thread_begin(policy,self);
- assert(sfr == SF_SUCCESS);
- } else {
- thread_lock(self);
- }
-
- continuation = self->continuation;
- self->continuation = (void (*)(void))0;
- thread_unlock(self);
+#if __SMP__
/*
- * N.B. - the following is necessary, since thread_invoke()
- * inhibits preemption on entry and reenables before it
- * returns. Unfortunately, the first time a newly-created
- * thread executes, it magically appears here, and never
- * executes the enable_preemption() call in thread_invoke().
+ * If the current thread is running on a processor that is no longer recommended, gently
+ * (non-urgently) get to a point and then block, and which point thread_select() should
+ * try to idle the processor and re-dispatch the thread to a recommended processor.
*/
- enable_preemption();
-
- if (self->funnel_state & TH_FN_REFUNNEL) {
- kern_return_t save_wait_result;
- self->funnel_state = 0;
- save_wait_result = self->wait_result;
- KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
- funnel_lock(self->funnel_lock);
- KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
- self->wait_result = save_wait_result;
- self->funnel_state = TH_FN_OWNED;
- }
- spllo();
-
- assert(continuation);
- (*continuation)();
- /*NOTREACHED*/
-}
+ if (!processor->is_recommended)
+ return (check_reason | AST_PREEMPT);
-#if MACH_LDEBUG || MACH_KDB
-
-#define THREAD_LOG_SIZE 300
-
-struct t64 {
- unsigned long h;
- unsigned long l;
-};
-
-struct {
- struct t64 stamp;
- thread_t thread;
- long info1;
- long info2;
- long info3;
- char * action;
-} thread_log[THREAD_LOG_SIZE];
+ /*
+ * Even though we could continue executing on this processor, a
+ * secondary SMT core should try to shed load to another primary core.
+ *
+ * TODO: Should this do the same check that thread_select does? i.e.
+ * if no bound threads target this processor, and idle primaries exist, preempt
+ * The case of RT threads existing is already taken care of above
+ * Consider Capri in this scenario.
+ *
+ * if (!SCHED(processor_bound_count)(processor) && !queue_empty(&pset->idle_queue))
+ *
+ * TODO: Alternatively - check if only primary is idle, or check if primary's pri is lower than mine.
+ */
-int thread_log_index;
+ if (processor->current_pri < BASEPRI_RTQUEUES &&
+ processor->processor_primary != processor)
+ return (check_reason | AST_PREEMPT);
+#endif
-void check_thread_time(long n);
+ if (thread->state & TH_SUSP)
+ return (check_reason | AST_PREEMPT);
+#if CONFIG_SCHED_SFI
+ /*
+ * Current thread may not need to be preempted, but maybe needs
+ * an SFI wait?
+ */
+ result = sfi_thread_needs_ast(thread, NULL);
+ if (result != AST_NONE)
+ return (check_reason | result);
+#endif
-int check_thread_time_crash;
+ return (AST_NONE);
+}
-#if 0
+/*
+ * set_sched_pri:
+ *
+ * Set the scheduled priority of the specified thread.
+ *
+ * This may cause the thread to change queues.
+ *
+ * Thread must be locked.
+ */
void
-check_thread_time(long us)
+set_sched_pri(
+ thread_t thread,
+ int priority)
{
- struct t64 temp;
-
- if (!check_thread_time_crash)
+ thread_t cthread = current_thread();
+ boolean_t is_current_thread = (thread == cthread) ? TRUE : FALSE;
+ int curgency, nurgency;
+ uint64_t urgency_param1, urgency_param2;
+ boolean_t removed_from_runq = FALSE;
+
+ /* If we're already at this priority, no need to mess with the runqueue */
+ if (priority == thread->sched_pri)
return;
- temp = thread_log[0].stamp;
- cyctm05_diff (&thread_log[1].stamp, &thread_log[0].stamp, &temp);
+ if (is_current_thread) {
+ assert(thread->runq == PROCESSOR_NULL);
+ curgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
+ } else {
+ removed_from_runq = thread_run_queue_remove(thread);
+ }
- if (temp.l >= us && thread_log[1].info != 0x49) /* HACK!!! */
- panic ("check_thread_time");
-}
-#endif
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+ (uintptr_t)thread_tid(thread),
+ thread->base_pri,
+ thread->sched_pri,
+ 0, /* eventually, 'reason' */
+ 0);
-void
-log_thread_action(char * action, long info1, long info2, long info3)
-{
- int i;
- spl_t x;
- static unsigned int tstamp;
+ thread->sched_pri = priority;
- x = splhigh();
+ if (is_current_thread) {
+ nurgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
+ /*
+ * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
+ * class alterations from user space to occur relatively infrequently, hence
+ * those are lazily handled. QoS classes have distinct priority bands, and QoS
+ * inheritance is expected to involve priority changes.
+ */
+ if (nurgency != curgency) {
+ thread_tell_urgency(nurgency, urgency_param1, urgency_param2, 0, thread);
+ machine_thread_going_on_core(thread, nurgency, 0);
+ }
+ }
- for (i = THREAD_LOG_SIZE-1; i > 0; i--) {
- thread_log[i] = thread_log[i-1];
+ /* TODO: Should this be TAILQ if it went down, HEADQ if it went up? */
+ if (removed_from_runq)
+ thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
+ else if (thread->state & TH_RUN) {
+ processor_t processor = thread->last_processor;
+
+ if (is_current_thread) {
+ ast_t preempt;
+
+ processor->current_pri = priority;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
+ if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
+ ast_on(preempt);
+ } else if (processor != PROCESSOR_NULL && processor->active_thread == thread)
+ cause_ast_check(processor);
}
+}
- thread_log[0].stamp.h = 0;
- thread_log[0].stamp.l = tstamp++;
- thread_log[0].thread = current_thread();
- thread_log[0].info1 = info1;
- thread_log[0].info2 = info2;
- thread_log[0].info3 = info3;
- thread_log[0].action = action;
-/* strcpy (&thread_log[0].action[0], action);*/
+/*
+ * thread_run_queue_remove_for_handoff
+ *
+ * Pull a thread or its (recursive) push target out of the runqueue
+ * so that it is ready for thread_run()
+ *
+ * Called at splsched
+ *
+ * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
+ * This may be different than the thread that was passed in.
+ */
+thread_t
+thread_run_queue_remove_for_handoff(thread_t thread) {
- splx(x);
-}
-#endif /* MACH_LDEBUG || MACH_KDB */
+ thread_t pulled_thread = THREAD_NULL;
-#if MACH_KDB
-#include <ddb/db_output.h>
-void db_show_thread_log(void);
+ thread_lock(thread);
-void
-db_show_thread_log(void)
-{
- int i;
+ /*
+ * Check that the thread is not bound
+ * to a different processor, and that realtime
+ * is not involved.
+ *
+ * Next, pull it off its run queue. If it
+ * doesn't come, it's not eligible.
+ */
- db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ",
- " Info3 ", " Timestamp ", "Action");
+ processor_t processor = current_processor();
+ if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES &&
+ (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) {
- for (i = 0; i < THREAD_LOG_SIZE; i++) {
- db_printf ("%08x %08x %08x %08x %08x/%08x %s\n",
- thread_log[i].thread,
- thread_log[i].info1,
- thread_log[i].info2,
- thread_log[i].info3,
- thread_log[i].stamp.h,
- thread_log[i].stamp.l,
- thread_log[i].action);
+ if (thread_run_queue_remove(thread))
+ pulled_thread = thread;
}
+
+ thread_unlock(thread);
+
+ return pulled_thread;
}
-#endif /* MACH_KDB */
/*
- * thread_block_reason:
+ * thread_run_queue_remove:
*
- * Block the current thread. If the thread is runnable
- * then someone must have woken it up between its request
- * to sleep and now. In this case, it goes back on a
- * run queue.
+ * Remove a thread from its current run queue and
+ * return TRUE if successful.
*
- * If a continuation is specified, then thread_block will
- * attempt to discard the thread's kernel stack. When the
- * thread resumes, it will execute the continuation function
- * on a new kernel stack.
+ * Thread must be locked.
+ *
+ * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
+ * run queues because the caller locked the thread. Otherwise
+ * the thread is on a run queue, but could be chosen for dispatch
+ * and removed by another processor under a different lock, which
+ * will set thread->runq to PROCESSOR_NULL.
+ *
+ * Hence the thread select path must not rely on anything that could
+ * be changed under the thread lock after calling this function,
+ * most importantly thread->sched_pri.
*/
-counter(mach_counter_t c_thread_block_calls = 0;)
-
-int
-thread_block_reason(
- void (*continuation)(void),
- int reason)
+boolean_t
+thread_run_queue_remove(
+ thread_t thread)
{
- register thread_t thread = current_thread();
- register processor_t myprocessor;
- register thread_t new_thread;
- spl_t s;
+ boolean_t removed = FALSE;
+ processor_t processor = thread->runq;
- counter(++c_thread_block_calls);
-
- check_simple_locks();
+ if ((thread->state & (TH_RUN|TH_WAIT)) == TH_WAIT) {
+ /* Thread isn't runnable */
+ assert(thread->runq == PROCESSOR_NULL);
+ return FALSE;
+ }
- machine_clock_assist();
+ if (processor == PROCESSOR_NULL) {
+ /*
+ * The thread is either not on the runq,
+ * or is in the midst of being removed from the runq.
+ *
+ * runq is set to NULL under the pset lock, not the thread
+ * lock, so the thread may still be in the process of being dequeued
+ * from the runq. It will wait in invoke for the thread lock to be
+ * dropped.
+ */
- s = splsched();
+ return FALSE;
+ }
- if ((thread->funnel_state & TH_FN_OWNED) && !(reason & AST_PREEMPT)) {
- thread->funnel_state = TH_FN_REFUNNEL;
- KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, thread->funnel_lock, 2, 0, 0, 0);
- funnel_unlock(thread->funnel_lock);
+ if (thread->sched_pri < BASEPRI_RTQUEUES) {
+ return SCHED(processor_queue_remove)(processor, thread);
}
- myprocessor = current_processor();
+ rt_lock_lock();
- thread_lock(thread);
- if (thread->state & TH_ABORT)
- clear_wait_internal(thread, THREAD_INTERRUPTED);
+ if (thread->runq != PROCESSOR_NULL) {
+ /*
+ * Thread is on the RT run queue and we have a lock on
+ * that run queue.
+ */
- /* Unconditionally remove either | both */
- ast_off(AST_QUANTUM|AST_BLOCK|AST_URGENT);
+ assert(thread->runq == THREAD_ON_RT_RUNQ);
- new_thread = thread_select(myprocessor);
- assert(new_thread);
- assert(thread_runnable(new_thread));
- thread_unlock(thread);
- while (!thread_invoke(thread, new_thread, reason, continuation)) {
- thread_lock(thread);
- new_thread = thread_select(myprocessor);
- assert(new_thread);
- assert(thread_runnable(new_thread));
- thread_unlock(thread);
- }
+ remqueue((queue_entry_t)thread);
+ SCHED_STATS_RUNQ_CHANGE(&rt_runq.runq_stats, rt_runq.count);
+ rt_runq.count--;
- if (thread->funnel_state & TH_FN_REFUNNEL) {
- kern_return_t save_wait_result;
+ thread->runq = PROCESSOR_NULL;
- save_wait_result = thread->wait_result;
- thread->funnel_state = 0;
- KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
- funnel_lock(thread->funnel_lock);
- KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
- thread->funnel_state = TH_FN_OWNED;
- thread->wait_result = save_wait_result;
+ removed = TRUE;
}
- splx(s);
-
- return thread->wait_result;
-}
+ rt_lock_unlock();
-/*
- * thread_block:
- *
- * Now calls thread_block_reason() which forwards the
- * the reason parameter to thread_invoke() so it can
- * do the right thing if the thread's quantum expired.
- */
-int
-thread_block(
- void (*continuation)(void))
-{
- return thread_block_reason(continuation, 0);
+ return (removed);
}
/*
- * thread_run:
+ * Put the thread back where it goes after a thread_run_queue_remove
*
- * Switch directly from the current thread to a specified
- * thread. Both the current and new threads must be
- * runnable.
+ * Thread must have been removed under the same thread lock hold
*
- * Assumption:
- * at splsched.
- */
-int
-thread_run(
- thread_t old_thread,
- void (*continuation)(void),
- thread_t new_thread)
-{
- while (!thread_invoke(old_thread, new_thread, 0, continuation)) {
- register processor_t myprocessor = current_processor();
- thread_lock(old_thread);
- new_thread = thread_select(myprocessor);
- thread_unlock(old_thread);
- }
- return old_thread->wait_result;
-}
-
-/*
- * Dispatches a running thread that is not on a runq.
- * Called at splsched.
+ * thread locked, at splsched
*/
void
-thread_dispatch(
- register thread_t thread)
+thread_run_queue_reinsert(thread_t thread, integer_t options)
{
- sched_policy_t *policy;
- sf_return_t sfr;
+ assert(thread->runq == PROCESSOR_NULL);
- /*
- * If we are discarding the thread's stack, we must do it
- * before the thread has a chance to run.
- */
- wake_lock(thread);
- thread_lock(thread);
+ assert(thread->state & (TH_RUN));
+ thread_setrun(thread, options);
-#ifndef i386
- /* no continuations on i386 for now */
- if (thread->continuation != (void (*)())0) {
- assert((thread->state & TH_STACK_STATE) == 0);
- thread->state |= TH_STACK_HANDOFF;
- stack_free(thread);
- if (thread->top_act) {
- act_machine_sv_free(thread->top_act);
- }
- }
-#endif
+}
- switch (thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
+void
+sys_override_cpu_throttle(int flag)
+{
+ if (flag == CPU_THROTTLE_ENABLE)
+ cpu_throttle_enabled = 1;
+ if (flag == CPU_THROTTLE_DISABLE)
+ cpu_throttle_enabled = 0;
+}
- case TH_RUN | TH_UNINT:
- case TH_RUN:
- /*
- * No reason to stop. Put back on a run queue.
- */
- /* Leave enqueueing thread up to scheduling policy */
- policy = &sched_policy[thread->policy];
- /*** ??? maybe use a macro ***/
- sfr = policy->sp_ops.sp_thread_dispatch(policy, thread);
- assert(sfr == SF_SUCCESS);
- break;
-
- case TH_RUN | TH_WAIT | TH_UNINT:
- case TH_RUN | TH_WAIT:
- thread->sleep_stamp = sched_tick;
- /* fallthrough */
- case TH_WAIT: /* this happens! */
-
+int
+thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
+{
+ if (thread == NULL || (thread->state & TH_IDLE)) {
+ *arg1 = 0;
+ *arg2 = 0;
+
+ return (THREAD_URGENCY_NONE);
+ } else if (thread->sched_mode == TH_MODE_REALTIME) {
+ *arg1 = thread->realtime.period;
+ *arg2 = thread->realtime.deadline;
+
+ return (THREAD_URGENCY_REAL_TIME);
+ } else if (cpu_throttle_enabled &&
+ ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
/*
- * Waiting
+ * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted
+ * TODO: Use TH_SFLAG_THROTTLED instead?
*/
- thread->state &= ~TH_RUN;
- if (thread->state & TH_TERMINATE)
- thread_reaper_enqueue(thread);
-
- if (thread->wake_active) {
- thread->wake_active = FALSE;
- thread_unlock(thread);
- wake_unlock(thread);
- thread_wakeup((event_t)&thread->wake_active);
- return;
- }
- break;
+ *arg1 = thread->sched_pri;
+ *arg2 = thread->base_pri;
- case TH_RUN | TH_IDLE:
- /*
- * Drop idle thread -- it is already in
- * idle_thread_array.
+ return (THREAD_URGENCY_BACKGROUND);
+ } else {
+ /* For otherwise unclassified threads, report throughput QoS
+ * parameters
*/
- break;
-
- default:
- panic("State 0x%x \n",thread->state);
+ *arg1 = thread->effective_policy.t_through_qos;
+ *arg2 = thread->task->effective_policy.t_through_qos;
+
+ return (THREAD_URGENCY_NORMAL);
}
- thread_unlock(thread);
- wake_unlock(thread);
}
+
/*
- * Enqueue thread on run queue. Thread must be locked,
- * and not already be on a run queue.
+ * This is the processor idle loop, which just looks for other threads
+ * to execute. Processor idle threads invoke this without supplying a
+ * current thread to idle without an asserted wait state.
+ *
+ * Returns a the next thread to execute if dispatched directly.
*/
-int
-run_queue_enqueue(
- register run_queue_t rq,
- register thread_t thread,
- boolean_t tail)
+
+#if 0
+#define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
+#else
+#define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
+#endif
+
+thread_t
+processor_idle(
+ thread_t thread,
+ processor_t processor)
{
- register int whichq;
- int oldrqcount;
-
- whichq = thread->sched_pri;
- assert(whichq >= MINPRI && whichq <= MAXPRI);
+ processor_set_t pset = processor->processor_set;
+ thread_t new_thread;
+ int state;
+ (void)splsched();
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START,
+ (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
+
+ SCHED_STATS_CPU_IDLE_START(processor);
+
+ timer_switch(&PROCESSOR_DATA(processor, system_state),
+ mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state));
+ PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
+
+ while (1) {
+ if (processor->state != PROCESSOR_IDLE) /* unsafe, but worst case we loop around once */
+ break;
+ if (pset->pending_AST_cpu_mask & (1ULL << processor->cpu_id))
+ break;
+ if (processor->is_recommended) {
+ if (rt_runq.count)
+ break;
+ } else {
+ if (SCHED(processor_bound_count)(processor))
+ break;
+ }
- simple_lock(&rq->lock); /* lock the run queue */
- assert(thread->runq == RUN_QUEUE_NULL);
- if (tail)
- enqueue_tail(&rq->queues[whichq], (queue_entry_t)thread);
- else
- enqueue_head(&rq->queues[whichq], (queue_entry_t)thread);
+#if CONFIG_SCHED_IDLE_IN_PLACE
+ if (thread != THREAD_NULL) {
+ /* Did idle-in-place thread wake up */
+ if ((thread->state & (TH_WAIT|TH_SUSP)) != TH_WAIT || thread->wake_active)
+ break;
+ }
+#endif
- setbit(MAXPRI - whichq, rq->bitmap);
- if (whichq > rq->highq)
- rq->highq = whichq;
+ IDLE_KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -1, 0);
- oldrqcount = rq->count++;
- thread->runq = rq;
- thread->whichq = whichq;
-#if DEBUG
- thread_check(thread, rq);
-#endif /* DEBUG */
- simple_unlock(&rq->lock);
+ machine_track_platform_idle(TRUE);
- return (oldrqcount);
-}
+ machine_idle();
-/*
- * thread_setrun:
- *
- * Make thread runnable; dispatch directly onto an idle processor
- * if possible. Else put on appropriate run queue (processor
- * if bound, else processor set. Caller must have lock on thread.
- * This is always called at splsched.
- * The tail parameter, if TRUE || TAIL_Q, indicates that the
- * thread should be placed at the tail of the runq. If
- * FALSE || HEAD_Q the thread will be placed at the head of the
- * appropriate runq.
- */
-void
-thread_setrun(
- register thread_t new_thread,
- boolean_t may_preempt,
- boolean_t tail)
-{
- register processor_t processor;
- register run_queue_t runq;
- register processor_set_t pset;
- thread_t thread;
- ast_t ast_flags = AST_BLOCK;
+ machine_track_platform_idle(FALSE);
- mp_disable_preemption();
+ (void)splsched();
- assert(!(new_thread->state & TH_SWAPPED_OUT));
- assert(thread_runnable(new_thread));
-
- /*
- * Update priority if needed.
- */
- if (new_thread->sched_stamp != sched_tick)
- update_priority(new_thread);
+ IDLE_KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -2, 0);
- if (new_thread->policy & (POLICY_FIFO|POLICY_RR)) {
- if ( new_thread->sched_pri >= (MAXPRI_KERNBAND - 2) &&
- kernel_preemption_mode == KERNEL_PREEMPT )
- ast_flags |= AST_URGENT;
+ if (!SCHED(processor_queue_empty)(processor)) {
+ /* Secondary SMT processors respond to directed wakeups
+ * exclusively. Some platforms induce 'spurious' SMT wakeups.
+ */
+ if (processor->processor_primary == processor)
+ break;
+ }
}
-
- assert(new_thread->runq == RUN_QUEUE_NULL);
- /*
- * Try to dispatch the thread directly onto an idle processor.
- */
- if ((processor = new_thread->bound_processor) == PROCESSOR_NULL) {
- /*
- * Not bound, any processor in the processor set is ok.
- */
- pset = new_thread->processor_set;
- if (pset->idle_count > 0) {
- simple_lock(&pset->idle_lock);
- if (pset->idle_count > 0) {
- processor = (processor_t) queue_first(&pset->idle_queue);
- queue_remove(&(pset->idle_queue), processor, processor_t,
- processor_queue);
- pset->idle_count--;
- processor->next_thread = new_thread;
- processor->state = PROCESSOR_DISPATCHING;
- simple_unlock(&pset->idle_lock);
- if(processor->slot_num != cpu_number())
- machine_signal_idle(processor);
- mp_enable_preemption();
- return;
- }
- simple_unlock(&pset->idle_lock);
- }
-
+ timer_switch(&PROCESSOR_DATA(processor, idle_state),
+ mach_absolute_time(), &PROCESSOR_DATA(processor, system_state));
+ PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
- /*
- * Preempt check
- */
- runq = &pset->runq;
- thread = current_thread();
- processor = current_processor();
- if ( may_preempt &&
- pset == processor->processor_set &&
- thread->sched_pri < new_thread->sched_pri ) {
- /*
- * XXX if we have a non-empty local runq or are
- * XXX running a bound thread, ought to check for
- * XXX another cpu running lower-pri thread to preempt.
- */
- /*
- * Turn off first_quantum to allow csw.
- */
- processor->first_quantum = FALSE;
+ pset_lock(pset);
- ast_on(ast_flags);
- }
+ /* If we were sent a remote AST and came out of idle, acknowledge it here with pset lock held */
+ pset->pending_AST_cpu_mask &= ~(1ULL << processor->cpu_id);
+#if defined(CONFIG_SCHED_DEFERRED_AST)
+ pset->pending_deferred_AST_cpu_mask &= ~(1ULL << processor->cpu_id);
+#endif
+ state = processor->state;
+ if (state == PROCESSOR_DISPATCHING) {
/*
- * Put us on the end of the runq, if we are not preempting
- * or the guy we are preempting.
+ * Commmon case -- cpu dispatched.
*/
- run_queue_enqueue(runq, new_thread, tail);
- }
- else {
- /*
- * Bound, can only run on bound processor. Have to lock
- * processor here because it may not be the current one.
- */
- if (processor->state == PROCESSOR_IDLE) {
- simple_lock(&processor->lock);
- pset = processor->processor_set;
- simple_lock(&pset->idle_lock);
- if (processor->state == PROCESSOR_IDLE) {
- queue_remove(&pset->idle_queue, processor,
- processor_t, processor_queue);
- pset->idle_count--;
- processor->next_thread = new_thread;
- processor->state = PROCESSOR_DISPATCHING;
- simple_unlock(&pset->idle_lock);
- simple_unlock(&processor->lock);
- if(processor->slot_num != cpu_number())
- machine_signal_idle(processor);
- mp_enable_preemption();
- return;
- }
- simple_unlock(&pset->idle_lock);
- simple_unlock(&processor->lock);
- }
-
- /*
- * Cause ast on processor if processor is on line, and the
- * currently executing thread is not bound to that processor
- * (bound threads have implicit priority over non-bound threads).
- * We also avoid sending the AST to the idle thread (if it got
- * scheduled in the window between the 'if' above and here),
- * since the idle_thread is bound.
- */
- runq = &processor->runq;
- thread = current_thread();
- if (processor == current_processor()) {
- if ( thread->bound_processor == PROCESSOR_NULL ||
- thread->sched_pri < new_thread->sched_pri ) {
- processor->first_quantum = FALSE;
- ast_on(ast_flags);
- }
+ new_thread = processor->next_thread;
+ processor->next_thread = THREAD_NULL;
+ processor->state = PROCESSOR_RUNNING;
- run_queue_enqueue(runq, new_thread, tail);
- }
- else {
- thread = cpu_data[processor->slot_num].active_thread;
- if ( run_queue_enqueue(runq, new_thread, tail) == 0 &&
- processor->state != PROCESSOR_OFF_LINE &&
- thread && thread->bound_processor != processor )
- cause_ast_check(processor);
- }
- }
+ if ((new_thread != THREAD_NULL) && (SCHED(processor_queue_has_priority)(processor, new_thread->sched_pri, FALSE) ||
+ (rt_runq.count > 0)) ) {
+ /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */
+ processor->current_pri = IDLEPRI;
+ processor->current_thmode = TH_MODE_FIXED;
+ processor->current_sfi_class = SFI_CLASS_KERNEL;
+ processor->deadline = UINT64_MAX;
- mp_enable_preemption();
-}
+ pset_unlock(pset);
-/*
- * set_pri:
- *
- * Set the priority of the specified thread to the specified
- * priority. This may cause the thread to change queues.
- *
- * The thread *must* be locked by the caller.
- */
-void
-set_pri(
- thread_t thread,
- int pri,
- boolean_t resched)
-{
- register struct run_queue *rq;
+ thread_lock(new_thread);
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REDISPATCH), (uintptr_t)thread_tid(new_thread), new_thread->sched_pri, rt_runq.count, 0, 0);
+ thread_setrun(new_thread, SCHED_HEADQ);
+ thread_unlock(new_thread);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
+ (uintptr_t)thread_tid(thread), state, 0, 0, 0);
+
+ return (THREAD_NULL);
+ }
+
+ pset_unlock(pset);
- rq = rem_runq(thread);
- assert(thread->runq == RUN_QUEUE_NULL);
- thread->sched_pri = pri;
- if (rq != RUN_QUEUE_NULL) {
- if (resched)
- thread_setrun(thread, TRUE, TAIL_Q);
- else
- run_queue_enqueue(rq, thread, TAIL_Q);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
+ (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0);
+
+ return (new_thread);
}
-}
+ else
+ if (state == PROCESSOR_IDLE) {
+ remqueue((queue_entry_t)processor);
+
+ processor->state = PROCESSOR_RUNNING;
+ processor->current_pri = IDLEPRI;
+ processor->current_thmode = TH_MODE_FIXED;
+ processor->current_sfi_class = SFI_CLASS_KERNEL;
+ processor->deadline = UINT64_MAX;
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
+ }
+ else
+ if (state == PROCESSOR_SHUTDOWN) {
+ /*
+ * Going off-line. Force a
+ * reschedule.
+ */
+ if ((new_thread = processor->next_thread) != THREAD_NULL) {
+ processor->next_thread = THREAD_NULL;
+ processor->current_pri = IDLEPRI;
+ processor->current_thmode = TH_MODE_FIXED;
+ processor->current_sfi_class = SFI_CLASS_KERNEL;
+ processor->deadline = UINT64_MAX;
-/*
- * rem_runq:
- *
- * Remove a thread from its run queue.
- * The run queue that the process was on is returned
- * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
- * before calling this routine. Unusual locking protocol on runq
- * field in thread structure makes this code interesting; see thread.h.
- */
-run_queue_t
-rem_runq(
- thread_t thread)
-{
- register struct run_queue *rq;
+ pset_unlock(pset);
+
+ thread_lock(new_thread);
+ thread_setrun(new_thread, SCHED_HEADQ);
+ thread_unlock(new_thread);
- rq = thread->runq;
- /*
- * If rq is RUN_QUEUE_NULL, the thread will stay out of the
- * run_queues because the caller locked the thread. Otherwise
- * the thread is on a runq, but could leave.
- */
- if (rq != RUN_QUEUE_NULL) {
- simple_lock(&rq->lock);
- if (rq == thread->runq) {
- /*
- * Thread is in a runq and we have a lock on
- * that runq.
- */
-#if DEBUG
- thread_check(thread, rq);
-#endif /* DEBUG */
- remqueue(&rq->queues[0], (queue_entry_t)thread);
- rq->count--;
-
- if (queue_empty(rq->queues + thread->sched_pri)) {
- /* update run queue status */
- if (thread->sched_pri != IDLEPRI)
- clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
- rq->highq = MAXPRI - ffsbit(rq->bitmap);
- }
- thread->runq = RUN_QUEUE_NULL;
- simple_unlock(&rq->lock);
- }
- else {
- /*
- * The thread left the runq before we could
- * lock the runq. It is not on a runq now, and
- * can't move again because this routine's
- * caller locked the thread.
- */
- assert(thread->runq == RUN_QUEUE_NULL);
- simple_unlock(&rq->lock);
- rq = RUN_QUEUE_NULL;
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
+ (uintptr_t)thread_tid(thread), state, 0, 0, 0);
+
+ return (THREAD_NULL);
}
}
- return (rq);
-}
+ pset_unlock(pset);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
+ (uintptr_t)thread_tid(thread), state, 0, 0, 0);
+
+ return (THREAD_NULL);
+}
/*
- * choose_thread:
- *
- * Choose a thread to execute. The thread chosen is removed
- * from its run queue. Note that this requires only that the runq
- * lock be held.
- *
- * Strategy:
- * Check processor runq first; if anything found, run it.
- * Else check pset runq; if nothing found, return idle thread.
- *
- * Second line of strategy is implemented by choose_pset_thread.
- * This is only called on processor startup and when thread_block
- * thinks there's something in the processor runq.
+ * Each processor has a dedicated thread which
+ * executes the idle loop when there is no suitable
+ * previous context.
*/
-thread_t
-choose_thread(
- processor_t myprocessor)
+void
+idle_thread(void)
{
- thread_t thread;
- register queue_t q;
- register run_queue_t runq;
- processor_set_t pset;
-
- runq = &myprocessor->runq;
- pset = myprocessor->processor_set;
-
- simple_lock(&runq->lock);
- if (runq->count > 0 && runq->highq >= pset->runq.highq) {
- q = runq->queues + runq->highq;
-#if MACH_ASSERT
- if (!queue_empty(q)) {
-#endif /*MACH_ASSERT*/
- thread = (thread_t)q->next;
- ((queue_entry_t)thread)->next->prev = q;
- q->next = ((queue_entry_t)thread)->next;
- thread->runq = RUN_QUEUE_NULL;
- runq->count--;
- if (queue_empty(q)) {
- if (runq->highq != IDLEPRI)
- clrbit(MAXPRI - runq->highq, runq->bitmap);
- runq->highq = MAXPRI - ffsbit(runq->bitmap);
- }
- simple_unlock(&runq->lock);
- return (thread);
-#if MACH_ASSERT
- }
- panic("choose_thread");
-#endif /*MACH_ASSERT*/
+ processor_t processor = current_processor();
+ thread_t new_thread;
+
+ new_thread = processor_idle(THREAD_NULL, processor);
+ if (new_thread != THREAD_NULL) {
+ thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread);
/*NOTREACHED*/
}
- simple_unlock(&runq->lock);
- simple_lock(&pset->runq.lock);
- return (choose_pset_thread(myprocessor, pset));
+ thread_block((thread_continue_t)idle_thread);
+ /*NOTREACHED*/
}
-
-/*
- * choose_pset_thread: choose a thread from processor_set runq or
- * set processor idle and choose its idle thread.
- *
- * Caller must be at splsched and have a lock on the runq. This
- * lock is released by this routine. myprocessor is always the current
- * processor, and pset must be its processor set.
- * This routine chooses and removes a thread from the runq if there
- * is one (and returns it), else it sets the processor idle and
- * returns its idle thread.
- */
-thread_t
-choose_pset_thread(
- register processor_t myprocessor,
- processor_set_t pset)
+kern_return_t
+idle_thread_create(
+ processor_t processor)
{
- register run_queue_t runq;
- register thread_t thread;
- register queue_t q;
+ kern_return_t result;
+ thread_t thread;
+ spl_t s;
- runq = &pset->runq;
- if (runq->count > 0) {
- q = runq->queues + runq->highq;
-#if MACH_ASSERT
- if (!queue_empty(q)) {
-#endif /*MACH_ASSERT*/
- thread = (thread_t)q->next;
- ((queue_entry_t)thread)->next->prev = q;
- q->next = ((queue_entry_t)thread)->next;
- thread->runq = RUN_QUEUE_NULL;
- runq->count--;
- if (queue_empty(q)) {
- if (runq->highq != IDLEPRI)
- clrbit(MAXPRI - runq->highq, runq->bitmap);
- runq->highq = MAXPRI - ffsbit(runq->bitmap);
- }
- simple_unlock(&runq->lock);
- return (thread);
-#if MACH_ASSERT
- }
- panic("choose_pset_thread");
-#endif /*MACH_ASSERT*/
- /*NOTREACHED*/
- }
- simple_unlock(&runq->lock);
+ result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
+ if (result != KERN_SUCCESS)
+ return (result);
- /*
- * Nothing is runnable, so set this processor idle if it
- * was running. If it was in an assignment or shutdown,
- * leave it alone. Return its idle thread.
- */
- simple_lock(&pset->idle_lock);
- if (myprocessor->state == PROCESSOR_RUNNING) {
- myprocessor->state = PROCESSOR_IDLE;
- /*
- * XXX Until it goes away, put master on end of queue, others
- * XXX on front so master gets used last.
- */
- if (myprocessor == master_processor)
- queue_enter(&(pset->idle_queue), myprocessor,
- processor_t, processor_queue);
- else
- queue_enter_first(&(pset->idle_queue), myprocessor,
- processor_t, processor_queue);
+ s = splsched();
+ thread_lock(thread);
+ thread->bound_processor = processor;
+ processor->idle_thread = thread;
+ thread->sched_pri = thread->base_pri = IDLEPRI;
+ thread->state = (TH_RUN | TH_IDLE);
+ thread->options |= TH_OPT_IDLE_THREAD;
+ thread_unlock(thread);
+ splx(s);
- pset->idle_count++;
- }
- simple_unlock(&pset->idle_lock);
+ thread_deallocate(thread);
- return (myprocessor->idle_thread);
+ return (KERN_SUCCESS);
}
/*
- * no_dispatch_count counts number of times processors go non-idle
- * without being dispatched. This should be very rare.
- */
-int no_dispatch_count = 0;
-
-/*
- * This is the idle thread, which just looks for other threads
- * to execute.
+ * sched_startup:
+ *
+ * Kicks off scheduler services.
+ *
+ * Called at splsched.
*/
void
-idle_thread_continue(void)
+sched_startup(void)
{
- register processor_t myprocessor;
- register volatile thread_t *threadp;
- register volatile int *gcount;
- register volatile int *lcount;
- register thread_t new_thread;
- register int state;
- register processor_set_t pset;
- int mycpu;
+ kern_return_t result;
+ thread_t thread;
- mycpu = cpu_number();
- myprocessor = current_processor();
- threadp = (volatile thread_t *) &myprocessor->next_thread;
- lcount = (volatile int *) &myprocessor->runq.count;
+ simple_lock_init(&sched_vm_group_list_lock, 0);
- for (;;) {
-#ifdef MARK_CPU_IDLE
- MARK_CPU_IDLE(mycpu);
-#endif /* MARK_CPU_IDLE */
+ result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
+ (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread);
+ if (result != KERN_SUCCESS)
+ panic("sched_startup");
- gcount = (volatile int *)&myprocessor->processor_set->runq.count;
+ thread_deallocate(thread);
- (void)splsched();
- while ( (*threadp == (volatile thread_t)THREAD_NULL) &&
- (*gcount == 0) && (*lcount == 0) ) {
+ /*
+ * Yield to the sched_init_thread once, to
+ * initialize our own thread after being switched
+ * back to.
+ *
+ * The current thread is the only other thread
+ * active at this point.
+ */
+ thread_block(THREAD_CONTINUE_NULL);
+}
- /* check for ASTs while we wait */
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
- if (need_ast[mycpu] &~ (AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT)) {
- /* don't allow scheduling ASTs */
- need_ast[mycpu] &= ~(AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT);
- ast_taken(FALSE, AST_ALL, TRUE); /* back at spllo */
- }
- else
-#ifdef __ppc__
- machine_idle();
-#else
- (void)spllo();
+static volatile uint64_t sched_maintenance_deadline;
+#if defined(CONFIG_TELEMETRY)
+static volatile uint64_t sched_telemetry_deadline = 0;
#endif
- machine_clock_assist();
+static uint64_t sched_tick_last_abstime;
+static uint64_t sched_tick_delta;
+uint64_t sched_tick_max_delta;
+/*
+ * sched_init_thread:
+ *
+ * Perform periodic bookkeeping functions about ten
+ * times per second.
+ */
+void
+sched_timeshare_maintenance_continue(void)
+{
+ uint64_t sched_tick_ctime, late_time;
- (void)splsched();
- }
+ struct sched_update_scan_context scan_context = {
+ .earliest_bg_make_runnable_time = UINT64_MAX,
+ .earliest_normal_make_runnable_time = UINT64_MAX,
+ .earliest_rt_make_runnable_time = UINT64_MAX
+ };
-#ifdef MARK_CPU_ACTIVE
- (void)spllo();
- MARK_CPU_ACTIVE(mycpu);
- (void)splsched();
-#endif /* MARK_CPU_ACTIVE */
+ sched_tick_ctime = mach_absolute_time();
- /*
- * This is not a switch statement to avoid the
- * bounds checking code in the common case.
+ if (__improbable(sched_tick_last_abstime == 0)) {
+ sched_tick_last_abstime = sched_tick_ctime;
+ late_time = 0;
+ sched_tick_delta = 1;
+ } else {
+ late_time = sched_tick_ctime - sched_tick_last_abstime;
+ sched_tick_delta = late_time / sched_tick_interval;
+ /* Ensure a delta of 1, since the interval could be slightly
+ * smaller than the sched_tick_interval due to dispatch
+ * latencies.
*/
- pset = myprocessor->processor_set;
- simple_lock(&pset->idle_lock);
-retry:
- state = myprocessor->state;
- if (state == PROCESSOR_DISPATCHING) {
- /*
- * Commmon case -- cpu dispatched.
- */
- new_thread = *threadp;
- *threadp = (volatile thread_t) THREAD_NULL;
- myprocessor->state = PROCESSOR_RUNNING;
- simple_unlock(&pset->idle_lock);
-
- thread_lock(new_thread);
- simple_lock(&myprocessor->runq.lock);
- simple_lock(&pset->runq.lock);
- if ( myprocessor->runq.highq > new_thread->sched_pri ||
- pset->runq.highq > new_thread->sched_pri ) {
- simple_unlock(&pset->runq.lock);
- simple_unlock(&myprocessor->runq.lock);
-
- if (new_thread->bound_processor != PROCESSOR_NULL)
- run_queue_enqueue(&myprocessor->runq, new_thread, HEAD_Q);
- else
- run_queue_enqueue(&pset->runq, new_thread, HEAD_Q);
- thread_unlock(new_thread);
+ sched_tick_delta = MAX(sched_tick_delta, 1);
- counter(c_idle_thread_block++);
- thread_block(idle_thread_continue);
- }
- else {
- simple_unlock(&pset->runq.lock);
- simple_unlock(&myprocessor->runq.lock);
+ /* In the event interrupt latencies or platform
+ * idle events that advanced the timebase resulted
+ * in periods where no threads were dispatched,
+ * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
+ * iterations.
+ */
+ sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
- /*
- * set up quantum for new thread.
- */
- if (new_thread->policy & (POLICY_RR|POLICY_FIFO))
- myprocessor->quantum = new_thread->unconsumed_quantum;
- else
- myprocessor->quantum = pset->set_quantum;
- thread_unlock(new_thread);
+ sched_tick_last_abstime = sched_tick_ctime;
+ sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
+ }
- myprocessor->first_quantum = TRUE;
- counter(c_idle_thread_handoff++);
- thread_run(myprocessor->idle_thread,
- idle_thread_continue, new_thread);
- }
- }
- else
- if (state == PROCESSOR_IDLE) {
- if (myprocessor->state != PROCESSOR_IDLE) {
- /*
- * Something happened, try again.
- */
- goto retry;
- }
- /*
- * Processor was not dispatched (Rare).
- * Set it running again.
- */
- no_dispatch_count++;
- pset->idle_count--;
- queue_remove(&pset->idle_queue, myprocessor,
- processor_t, processor_queue);
- myprocessor->state = PROCESSOR_RUNNING;
- simple_unlock(&pset->idle_lock);
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE)|DBG_FUNC_START,
+ sched_tick_delta,
+ late_time,
+ 0,
+ 0,
+ 0);
- counter(c_idle_thread_block++);
- thread_block(idle_thread_continue);
- }
- else
- if ( state == PROCESSOR_ASSIGN ||
- state == PROCESSOR_SHUTDOWN ) {
- /*
- * Changing processor sets, or going off-line.
- * Release next_thread if there is one. Actual
- * thread to run is on a runq.
- */
- if ((new_thread = (thread_t)*threadp) != THREAD_NULL) {
- *threadp = (volatile thread_t) THREAD_NULL;
- simple_unlock(&pset->idle_lock);
- thread_lock(new_thread);
- thread_setrun(new_thread, FALSE, TAIL_Q);
- thread_unlock(new_thread);
- } else
- simple_unlock(&pset->idle_lock);
-
- counter(c_idle_thread_block++);
- thread_block(idle_thread_continue);
- }
- else {
- simple_unlock(&pset->idle_lock);
- printf("Bad processor state %d (Cpu %d)\n",
- cpu_state(mycpu), mycpu);
- panic("idle_thread");
+ /* Add a number of pseudo-ticks corresponding to the elapsed interval
+ * This could be greater than 1 if substantial intervals where
+ * all processors are idle occur, which rarely occurs in practice.
+ */
+
+ sched_tick += sched_tick_delta;
- }
+ /*
+ * Compute various averages.
+ */
+ compute_averages(sched_tick_delta);
- (void)spllo();
- }
-}
+ /*
+ * Scan the run queues for threads which
+ * may need to be updated.
+ */
+ SCHED(thread_update_scan)(&scan_context);
-void
-idle_thread(void)
-{
- thread_t self = current_thread();
- spl_t s;
+ rt_runq_scan(&scan_context);
- stack_privilege(self);
- thread_swappable(current_act(), FALSE);
+ uint64_t ctime = mach_absolute_time();
- s = splsched();
- thread_lock(self);
+ machine_max_runnable_latency(ctime > scan_context.earliest_bg_make_runnable_time ? ctime - scan_context.earliest_bg_make_runnable_time : 0,
+ ctime > scan_context.earliest_normal_make_runnable_time ? ctime - scan_context.earliest_normal_make_runnable_time : 0,
+ ctime > scan_context.earliest_rt_make_runnable_time ? ctime - scan_context.earliest_rt_make_runnable_time : 0);
- self->priority = IDLEPRI;
- self->sched_pri = self->priority;
+ /*
+ * Check to see if the special sched VM group needs attention.
+ */
+ sched_vm_group_maintenance();
- thread_unlock(self);
- splx(s);
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE)|DBG_FUNC_END,
+ sched_pri_shift,
+ sched_background_pri_shift,
+ 0,
+ 0,
+ 0);
- counter(c_idle_thread_block++);
- thread_block((void(*)(void))0);
- idle_thread_continue();
+ assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
+ thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
/*NOTREACHED*/
}
-static AbsoluteTime sched_tick_interval, sched_tick_deadline;
+static uint64_t sched_maintenance_wakeups;
/*
- * sched_tick_thread
- *
- * Update the priorities of all threads periodically.
+ * Determine if the set of routines formerly driven by a maintenance timer
+ * must be invoked, based on a deadline comparison. Signals the scheduler
+ * maintenance thread on deadline expiration. Must be invoked at an interval
+ * lower than the "sched_tick_interval", currently accomplished by
+ * invocation via the quantum expiration timer and at context switch time.
+ * Performance matters: this routine reuses a timestamp approximating the
+ * current absolute time received from the caller, and should perform
+ * no more than a comparison against the deadline in the common case.
*/
void
-sched_tick_thread_continue(void)
-{
- AbsoluteTime abstime;
-#if SIMPLE_CLOCK
- int new_usec;
-#endif /* SIMPLE_CLOCK */
+sched_timeshare_consider_maintenance(uint64_t ctime) {
+ uint64_t ndeadline, deadline = sched_maintenance_deadline;
- clock_get_uptime(&abstime);
+ if (__improbable(ctime >= deadline)) {
+ if (__improbable(current_thread() == sched_maintenance_thread))
+ return;
+ OSMemoryBarrier();
- sched_tick++; /* age usage one more time */
-#if SIMPLE_CLOCK
- /*
- * Compensate for clock drift. sched_usec is an
- * exponential average of the number of microseconds in
- * a second. It decays in the same fashion as cpu_usage.
- */
- new_usec = sched_usec_elapsed();
- sched_usec = (5*sched_usec + 3*new_usec)/8;
-#endif /* SIMPLE_CLOCK */
+ ndeadline = ctime + sched_tick_interval;
- /*
- * Compute the scheduler load factors.
- */
- compute_mach_factor();
+ if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline, deadline, ndeadline))) {
+ thread_wakeup((event_t)sched_timeshare_maintenance_continue);
+ sched_maintenance_wakeups++;
+ }
+ }
+#if defined(CONFIG_TELEMETRY)
/*
- * Scan the run queues for runnable threads that need to
- * have their priorities recalculated.
+ * Windowed telemetry is driven by the scheduler. It should be safe
+ * to call compute_telemetry_windowed() even when windowed telemetry
+ * is disabled, but we should try to avoid doing extra work for no
+ * reason.
*/
- do_thread_scan();
+ if (telemetry_window_enabled) {
+ deadline = sched_telemetry_deadline;
- clock_deadline_for_periodic_event(sched_tick_interval, abstime,
- &sched_tick_deadline);
+ if (__improbable(ctime >= deadline)) {
+ ndeadline = ctime + sched_telemetry_interval;
- assert_wait((event_t)sched_tick_thread_continue, THREAD_INTERRUPTIBLE);
- thread_set_timer_deadline(sched_tick_deadline);
- thread_block(sched_tick_thread_continue);
- /*NOTREACHED*/
+ if (__probable(__sync_bool_compare_and_swap(&sched_telemetry_deadline, deadline, ndeadline))) {
+ compute_telemetry_windowed();
+ }
+ }
+ }
+#endif /* CONFIG_TELEMETRY */
}
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
void
-sched_tick_thread(void)
+sched_init_thread(void (*continuation)(void))
{
- thread_t self = current_thread();
- natural_t rate;
- spl_t s;
-
- stack_privilege(self);
- thread_swappable(self->top_act, FALSE);
-
- s = splsched();
- thread_lock(self);
-
- self->priority = MAXPRI_STANDARD;
- self->sched_pri = self->priority;
+ thread_block(THREAD_CONTINUE_NULL);
- thread_unlock(self);
- splx(s);
+ sched_maintenance_thread = current_thread();
+ continuation();
- rate = (1000 >> SCHED_TICK_SHIFT);
- clock_interval_to_absolutetime_interval(rate, USEC_PER_SEC,
- &sched_tick_interval);
- clock_get_uptime(&sched_tick_deadline);
-
- thread_block(sched_tick_thread_continue);
/*NOTREACHED*/
}
-#define MAX_STUCK_THREADS 128
+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
/*
- * do_thread_scan: scan for stuck threads. A thread is stuck if
- * it is runnable but its priority is so low that it has not
- * run for several seconds. Its priority should be higher, but
- * won't be until it runs and calls update_priority. The scanner
- * finds these threads and does the updates.
+ * thread_update_scan / runq_scan:
+ *
+ * Scan the run queues to account for timesharing threads
+ * which need to be updated.
*
* Scanner runs in two passes. Pass one squirrels likely
- * thread ids away in an array (takes out references for them).
- * Pass two does the priority updates. This is necessary because
- * the run queue lock is required for the candidate scan, but
- * cannot be held during updates [set_pri will deadlock].
+ * threads away in an array, pass two does the update.
*
- * Array length should be enough so that restart isn't necessary,
- * but restart logic is included. Does not scan processor runqs.
+ * This is necessary because the run queue is locked for
+ * the candidate scan, but the thread is locked for the update.
*
+ * Array should be sized to make forward progress, without
+ * disabling preemption for long periods.
*/
-thread_t stuck_threads[MAX_STUCK_THREADS];
-int stuck_count = 0;
+
+#define THREAD_UPDATE_SIZE 128
+
+static thread_t thread_update_array[THREAD_UPDATE_SIZE];
+static int thread_update_count = 0;
+
+/* Returns TRUE if thread was added, FALSE if thread_update_array is full */
+boolean_t
+thread_update_add_thread(thread_t thread)
+{
+ if (thread_update_count == THREAD_UPDATE_SIZE)
+ return (FALSE);
+
+ thread_update_array[thread_update_count++] = thread;
+ thread_reference_internal(thread);
+ return (TRUE);
+}
+
+void
+thread_update_process_threads(void)
+{
+ while (thread_update_count > 0) {
+ spl_t s;
+ thread_t thread = thread_update_array[--thread_update_count];
+ thread_update_array[thread_update_count] = THREAD_NULL;
+
+ s = splsched();
+ thread_lock(thread);
+ if (!(thread->state & (TH_WAIT)) && (SCHED(can_update_priority)(thread))) {
+ SCHED(update_priority)(thread);
+ }
+ thread_unlock(thread);
+ splx(s);
+
+ thread_deallocate(thread);
+ }
+}
/*
- * do_runq_scan is the guts of pass 1. It scans a runq for
- * stuck threads. A boolean is returned indicating whether
- * a retry is needed.
+ * Scan a runq for candidate threads.
+ *
+ * Returns TRUE if retry is needed.
*/
boolean_t
-do_runq_scan(
- run_queue_t runq)
+runq_scan(
+ run_queue_t runq,
+ sched_update_scan_context_t scan_context)
{
+ register int count;
register queue_t q;
register thread_t thread;
- register int count;
- spl_t s;
- boolean_t result = FALSE;
- s = splsched();
- simple_lock(&runq->lock);
if ((count = runq->count) > 0) {
q = runq->queues + runq->highq;
while (count > 0) {
queue_iterate(q, thread, thread_t, links) {
- if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
- thread->policy == POLICY_TIMESHARE ) {
- if (thread->sched_stamp != sched_tick) {
- /*
- * Stuck, save its id for later.
- */
- if (stuck_count == MAX_STUCK_THREADS) {
- /*
- * !@#$% No more room.
- */
- simple_unlock(&runq->lock);
- splx(s);
-
- return (TRUE);
- }
-
- /*
- * Inline version of thread_reference
- * XXX - lock ordering problem here:
- * thread locks should be taken before runq
- * locks: just try and get the thread's locks
- * and ignore this thread if we fail, we might
- * have better luck next time.
- */
- if (simple_lock_try(&thread->lock)) {
- thread->ref_count++;
- thread_unlock(thread);
- stuck_threads[stuck_count++] = thread;
- }
- else
- result = TRUE;
+ if ( thread->sched_stamp != sched_tick &&
+ (thread->sched_mode == TH_MODE_TIMESHARE) ) {
+ if (thread_update_add_thread(thread) == FALSE)
+ return (TRUE);
+ }
+
+ if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
+ if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
+ scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
+ }
+ } else {
+ if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
+ scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
}
}
q--;
}
}
- simple_unlock(&runq->lock);
- splx(s);
- return (result);
+ return (FALSE);
}
-boolean_t thread_scan_enabled = TRUE;
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+boolean_t
+thread_eager_preemption(thread_t thread)
+{
+ return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0);
+}
void
-do_thread_scan(void)
+thread_set_eager_preempt(thread_t thread)
{
- register boolean_t restart_needed = FALSE;
- register thread_t thread;
- register processor_set_t pset = &default_pset;
- register processor_t processor;
- spl_t s;
+ spl_t x;
+ processor_t p;
+ ast_t ast = AST_NONE;
- if (!thread_scan_enabled)
- return;
+ x = splsched();
+ p = current_processor();
- do {
- restart_needed = do_runq_scan(&pset->runq);
- if (!restart_needed) {
- simple_lock(&pset->processors_lock);
- processor = (processor_t)queue_first(&pset->processors);
- while (!queue_end(&pset->processors, (queue_entry_t)processor)) {
- if (restart_needed = do_runq_scan(&processor->runq))
- break;
+ thread_lock(thread);
+ thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
- processor = (processor_t)queue_next(&processor->processors);
- }
- simple_unlock(&pset->processors_lock);
+ if (thread == current_thread()) {
+
+ ast = csw_check(p, AST_NONE);
+ thread_unlock(thread);
+ if (ast != AST_NONE) {
+ (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
}
+ } else {
+ p = thread->last_processor;
- /*
- * Ok, we now have a collection of candidates -- fix them.
- */
- while (stuck_count > 0) {
- thread = stuck_threads[--stuck_count];
- stuck_threads[stuck_count] = THREAD_NULL;
- s = splsched();
- thread_lock(thread);
- if (thread->policy == POLICY_TIMESHARE) {
- if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
- thread->sched_stamp != sched_tick )
- update_priority(thread);
- }
- thread_unlock(thread);
- splx(s);
- thread_deallocate(thread);
- }
-
- } while (restart_needed);
-}
+ if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
+ p->active_thread == thread) {
+ cause_ast_check(p);
+ }
-/*
- * Just in case someone doesn't use the macro
- */
-#undef thread_wakeup
-void
-thread_wakeup(
- event_t x);
+ thread_unlock(thread);
+ }
-void
-thread_wakeup(
- event_t x)
-{
- thread_wakeup_with_result(x, THREAD_AWAKENED);
+ splx(x);
}
-boolean_t
-thread_runnable(
- thread_t thread)
+void
+thread_clear_eager_preempt(thread_t thread)
{
- sched_policy_t *policy;
+ spl_t x;
- /* Ask sched policy if thread is runnable */
- policy = policy_id_to_sched_policy(thread->policy);
+ x = splsched();
+ thread_lock(thread);
- return ((policy != SCHED_POLICY_NULL)?
- policy->sp_ops.sp_thread_runnable(policy, thread) : FALSE);
+ thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
+
+ thread_unlock(thread);
+ splx(x);
}
-#if DEBUG
-
-void
-dump_processor_set(
- processor_set_t ps)
-{
- printf("processor_set: %08x\n",ps);
- printf("idle_queue: %08x %08x, idle_count: 0x%x\n",
- ps->idle_queue.next,ps->idle_queue.prev,ps->idle_count);
- printf("processors: %08x %08x, processor_count: 0x%x\n",
- ps->processors.next,ps->processors.prev,ps->processor_count);
- printf("tasks: %08x %08x, task_count: 0x%x\n",
- ps->tasks.next,ps->tasks.prev,ps->task_count);
- printf("threads: %08x %08x, thread_count: 0x%x\n",
- ps->threads.next,ps->threads.prev,ps->thread_count);
- printf("ref_count: 0x%x, active: %x\n",
- ps->ref_count,ps->active);
- printf("pset_self: %08x, pset_name_self: %08x\n",ps->pset_self, ps->pset_name_self);
- printf("max_priority: 0x%x, policies: 0x%x, set_quantum: 0x%x\n",
- ps->max_priority, ps->policies, ps->set_quantum);
-}
-
-#define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s])
-
+/*
+ * Scheduling statistics
+ */
void
-dump_processor(
- processor_t p)
+sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
{
- char *states[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING",
- "ASSIGN","SHUTDOWN"};
-
- printf("processor: %08x\n",p);
- printf("processor_queue: %08x %08x\n",
- p->processor_queue.next,p->processor_queue.prev);
- printf("state: %8s, next_thread: %08x, idle_thread: %08x\n",
- processor_state(p->state), p->next_thread, p->idle_thread);
- printf("quantum: %u, first_quantum: %x, last_quantum: %u\n",
- p->quantum, p->first_quantum, p->last_quantum);
- printf("processor_set: %08x, processor_set_next: %08x\n",
- p->processor_set, p->processor_set_next);
- printf("processors: %08x %08x\n", p->processors.next,p->processors.prev);
- printf("processor_self: %08x, slot_num: 0x%x\n", p->processor_self, p->slot_num);
-}
+ struct processor_sched_statistics *stats;
+ boolean_t to_realtime = FALSE;
+
+ stats = &processor->processor_data.sched_stats;
+ stats->csw_count++;
-void
-dump_run_queue_struct(
- run_queue_t rq)
-{
- char dump_buf[80];
- int i;
+ if (otherpri >= BASEPRI_REALTIME) {
+ stats->rt_sched_count++;
+ to_realtime = TRUE;
+ }
- for( i=0; i < NRQS; ) {
- int j;
+ if ((reasons & AST_PREEMPT) != 0) {
+ stats->preempt_count++;
- printf("%6s",(i==0)?"runq:":"");
- for( j=0; (j<8) && (i < NRQS); j++,i++ ) {
- if( rq->queues[i].next == &rq->queues[i] )
- printf( " --------");
- else
- printf(" %08x",rq->queues[i].next);
- }
- printf("\n");
- }
- for( i=0; i < NRQBM; ) {
- register unsigned int mask;
- char *d=dump_buf;
+ if (selfpri >= BASEPRI_REALTIME) {
+ stats->preempted_rt_count++;
+ }
- mask = ~0;
- mask ^= (mask>>1);
+ if (to_realtime) {
+ stats->preempted_by_rt_count++;
+ }
- do {
- *d++ = ((rq->bitmap[i]&mask)?'r':'e');
- mask >>=1;
- } while( mask );
- *d = '\0';
- printf("%8s%s\n",((i==0)?"bitmap:":""),dump_buf);
- i++;
- }
- printf("highq: 0x%x, count: %u\n", rq->highq, rq->count);
+ }
}
-
+
void
-dump_run_queues(
- run_queue_t runq)
+sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
{
- register queue_t q1;
- register int i;
- register queue_entry_t e;
-
- q1 = runq->queues;
- for (i = 0; i < NRQS; i++) {
- if (q1->next != q1) {
- int t_cnt;
+ uint64_t timestamp = mach_absolute_time();
- printf("[%u]",i);
- for (t_cnt=0, e = q1->next; e != q1; e = e->next) {
- printf("\t0x%08x",e);
- if( (t_cnt = ++t_cnt%4) == 0 )
- printf("\n");
- }
- if( t_cnt )
- printf("\n");
- }
- /* else
- printf("[%u]\t<empty>\n",i);
- */
- q1++;
- }
+ stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
+ stats->last_change_timestamp = timestamp;
}
+/*
+ * For calls from assembly code
+ */
+#undef thread_wakeup
void
-checkrq(
- run_queue_t rq,
- char *msg)
-{
- register queue_t q1;
- register int i, j;
- register queue_entry_t e;
- register int highq;
-
- highq = NRQS;
- j = 0;
- q1 = rq->queues;
- for (i = MAXPRI; i >= 0; i--) {
- if (q1->next == q1) {
- if (q1->prev != q1) {
- panic("checkrq: empty at %s", msg);
- }
- }
- else {
- if (highq == -1)
- highq = i;
-
- for (e = q1->next; e != q1; e = e->next) {
- j++;
- if (e->next->prev != e)
- panic("checkrq-2 at %s", msg);
- if (e->prev->next != e)
- panic("checkrq-3 at %s", msg);
- }
- }
- q1++;
- }
- if (j != rq->count)
- panic("checkrq: count wrong at %s", msg);
- if (rq->count != 0 && highq > rq->highq)
- panic("checkrq: highq wrong at %s", msg);
-}
+thread_wakeup(
+ event_t x);
void
-thread_check(
- register thread_t thread,
- register run_queue_t rq)
+thread_wakeup(
+ event_t x)
{
- register int whichq = thread->sched_pri;
- register queue_entry_t queue, entry;
+ thread_wakeup_with_result(x, THREAD_AWAKENED);
+}
- if (whichq < MINPRI || whichq > MAXPRI)
- panic("thread_check: bad pri");
+boolean_t
+preemption_enabled(void)
+{
+ return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
+}
- if (whichq != thread->whichq)
- panic("thread_check: whichq");
+static void
+sched_timer_deadline_tracking_init(void) {
+ nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
+ nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
+}
- queue = &rq->queues[whichq];
- entry = queue_first(queue);
- while (!queue_end(queue, entry)) {
- if (entry == (queue_entry_t)thread)
- return;
- entry = queue_next(entry);
+kern_return_t
+sched_work_interval_notify(thread_t thread, uint64_t work_interval_id, uint64_t start, uint64_t finish, uint64_t deadline, uint64_t next_start, uint32_t flags)
+{
+ int urgency;
+ uint64_t urgency_param1, urgency_param2;
+ spl_t s;
+
+ if (work_interval_id == 0) {
+ return (KERN_INVALID_ARGUMENT);
}
- panic("thread_check: not found");
-}
+ assert(thread == current_thread());
-#endif /* DEBUG */
+ thread_mtx_lock(thread);
+ if (thread->work_interval_id != work_interval_id) {
+ thread_mtx_unlock(thread);
+ return (KERN_INVALID_ARGUMENT);
+ }
+ thread_mtx_unlock(thread);
+
+ s = splsched();
+ thread_lock(thread);
+ urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
+ thread_unlock(thread);
+ splx(s);
-#if MACH_KDB
-#include <ddb/db_output.h>
-#define printf kdbprintf
-extern int db_indent;
-void db_sched(void);
+ machine_work_interval_notify(thread, work_interval_id, start, finish, deadline, next_start, urgency, flags);
+ return (KERN_SUCCESS);
+}
-void
-db_sched(void)
-{
- iprintf("Scheduling Statistics:\n");
- db_indent += 2;
- iprintf("Thread invocations: csw %d same %d\n",
- c_thread_invoke_csw, c_thread_invoke_same);
-#if MACH_COUNTERS
- iprintf("Thread block: calls %d\n",
- c_thread_block_calls);
- iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
- c_idle_thread_handoff,
- c_idle_thread_block, no_dispatch_count);
- iprintf("Sched thread blocks: %d\n", c_sched_thread_block);
-#endif /* MACH_COUNTERS */
- db_indent -= 2;
-}
-#endif /* MACH_KDB */
+void thread_set_options(uint32_t thopt) {
+ spl_t x;
+ thread_t t = current_thread();
+
+ x = splsched();
+ thread_lock(t);
+
+ t->options |= thopt;
+
+ thread_unlock(t);
+ splx(x);
+}