X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..527f99514973766e9c0382a4d8550dfb00f54939:/osfmk/kern/processor.h diff --git a/osfmk/kern/processor.h b/osfmk/kern/processor.h index 0c0a7b90b..09caf6a7f 100644 --- a/osfmk/kern/processor.h +++ b/osfmk/kern/processor.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -74,190 +74,302 @@ #include #include #include -#include +#include +#include +#include #include #include +#include #include -#include +typedef enum { + PSET_SMP, +} pset_cluster_type_t; struct processor_set { - queue_head_t idle_queue; /* idle processors */ - int idle_count; /* how many ? */ queue_head_t active_queue; /* active processors */ - - queue_head_t processors; /* all processors here */ - int processor_count;/* how many ? */ - decl_simple_lock_data(,sched_lock) /* lock for runq and above */ - - struct run_queue runq; /* runq for this set */ - - queue_head_t tasks; /* tasks assigned */ - int task_count; /* how many */ - queue_head_t threads; /* threads in this set */ - int thread_count; /* how many */ - int ref_count; /* structure ref count */ - boolean_t active; /* is pset in use */ - decl_mutex_data(, lock) /* lock for above */ - - int timeshare_quanta; /* timeshare quantum factor */ + queue_head_t idle_queue; /* idle processors */ + queue_head_t idle_secondary_queue; /* idle secondary processors */ + queue_head_t unused_queue; /* processors not recommended by CLPC */ + + int online_processor_count; + int active_processor_count; + int load_average; + + int cpu_set_low, cpu_set_hi; + int cpu_set_count; + uint64_t cpu_bitmask; + uint64_t recommended_bitmask; + +#if __SMP__ + decl_simple_lock_data(,sched_lock) /* lock for above */ +#endif + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) + struct run_queue pset_runq; /* runq for this processor set */ +#endif + struct rt_queue rt_runq; /* realtime runq for this processor set */ + +#if defined(CONFIG_SCHED_TRADITIONAL) + int pset_runq_bound_count; + /* # of threads in runq bound to any processor in pset */ +#endif + + /* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */ + uint64_t pending_AST_cpu_mask; +#if defined(CONFIG_SCHED_DEFERRED_AST) + /* + * A separate mask, for ASTs that we may be able to cancel. This is dependent on + * some level of support for requesting an AST on a processor, and then quashing + * that request later. + * + * The purpose of this field (and the associated codepaths) is to infer when we + * no longer need a processor that is DISPATCHING to come up, and to prevent it + * from coming out of IDLE if possible. This should serve to decrease the number + * of spurious ASTs in the system, and let processors spend longer periods in + * IDLE. + */ + uint64_t pending_deferred_AST_cpu_mask; +#endif + uint64_t pending_spill_cpu_mask; struct ipc_port * pset_self; /* port for operations */ struct ipc_port * pset_name_self; /* port for information */ - uint32_t run_count; /* threads running in set */ - uint32_t share_count; /* timeshare threads running in set */ + processor_set_t pset_list; /* chain of associated psets */ + pset_node_t node; + uint32_t pset_cluster_id; + pset_cluster_type_t pset_cluster_type; +}; - integer_t mach_factor; /* mach_factor */ - integer_t load_average; /* load_average */ +extern struct processor_set pset0; - uint32_t pri_shift; /* timeshare usage -> priority */ +struct pset_node { + processor_set_t psets; /* list of associated psets */ + + pset_node_t nodes; /* list of associated subnodes */ + pset_node_t node_list; /* chain of associated nodes */ + + pset_node_t parent; }; -extern struct processor_set default_pset; +extern struct pset_node pset_node0; + +extern queue_head_t tasks, terminated_tasks, threads, corpse_tasks; /* Terminated tasks are ONLY for stackshot */ +extern int tasks_count, terminated_tasks_count, threads_count; +decl_lck_mtx_data(extern,tasks_threads_lock) +decl_lck_mtx_data(extern,tasks_corpse_lock) struct processor { queue_chain_t processor_queue;/* idle/active queue link, * MUST remain the first element */ int state; /* See below */ + boolean_t is_SMT; + boolean_t is_recommended; struct thread *active_thread, /* thread running on processor */ *next_thread, /* next thread when dispatched */ *idle_thread; /* this processor's idle thread. */ - processor_set_t processor_set; /* current membership */ + processor_set_t processor_set; /* assigned set */ - int current_pri; /* priority of current thread */ + int current_pri; /* priority of current thread */ + sfi_class_id_t current_sfi_class; /* SFI class of current thread */ + perfcontrol_class_t current_perfctl_class; /* Perfcontrol class for current thread */ + int starting_pri; /* priority of current thread as it was when scheduled */ + pset_cluster_type_t current_recommended_pset_type; /* Cluster type recommended for current thread */ + int cpu_id; /* platform numeric id */ timer_call_data_t quantum_timer; /* timer for quantum expiration */ uint64_t quantum_end; /* time when current quantum ends */ uint64_t last_dispatch; /* time of last dispatch */ - int timeslice; /* quanta before timeslice ends */ uint64_t deadline; /* current deadline */ - - struct run_queue runq; /* local runq for this processor */ - - queue_chain_t processors; /* processors in set */ - decl_simple_lock_data(,lock) + boolean_t first_timeslice; /* has the quantum expired since context switch */ + +#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) + struct run_queue runq; /* runq for this processor */ +#endif + +#if defined(CONFIG_SCHED_TRADITIONAL) + int runq_bound_count; /* # of threads bound to this processor */ +#endif +#if defined(CONFIG_SCHED_GRRR) + struct grrr_run_queue grrr_runq; /* Group Ratio Round-Robin runq */ +#endif + + processor_t processor_primary; /* pointer to primary processor for + * secondary SMT processors, or a pointer + * to ourselves for primaries or non-SMT */ + processor_t processor_secondary; struct ipc_port * processor_self; /* port for operations */ + processor_t processor_list; /* all existing processors */ processor_data_t processor_data; /* per-processor data */ }; extern processor_t processor_list; -extern unsigned int processor_count; decl_simple_lock_data(extern,processor_list_lock) -extern processor_t master_processor; +#define MAX_SCHED_CPUS 64 /* Maximum number of CPUs supported by the scheduler. bits.h:bitmap_*() macros need to be used to support greater than 64 */ +extern processor_t processor_array[MAX_SCHED_CPUS]; /* array indexed by cpuid */ + +extern uint32_t processor_avail_count; + +extern processor_t master_processor; + +extern boolean_t sched_stats_active; /* - * NOTE: The processor->processor_set link is needed in one of the - * scheduler's critical paths. [Figure out where to look for another - * thread to run on this processor.] It is accessed without locking. - * The following access protocol controls this field. + * Processor state is accessed by locking the scheduling lock + * for the assigned processor set. * - * Read from own processor - just read. - * Read from another processor - lock processor structure during read. - * Write from own processor - lock processor structure during write. - * Write from another processor - NOT PERMITTED. + * -------------------- SHUTDOWN + * / ^ ^ + * _/ | \ + * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING + * \_________________^ ^ ^______/ / + * \__________________/ * + * Most of these state transitions are externally driven as a + * a directive (for instance telling an IDLE processor to start + * coming out of the idle state to run a thread). However these + * are typically paired with a handshake by the processor itself + * to indicate that it has completed a transition of indeterminate + * length (for example, the DISPATCHING->RUNNING or START->RUNNING + * transitions must occur on the processor itself). + * + * The boot processor has some special cases, and skips the START state, + * since it has already bootstrapped and is ready to context switch threads. + * + * When a processor is in DISPATCHING or RUNNING state, the current_pri, + * current_thmode, and deadline fields should be set, so that other + * processors can evaluate if it is an appropriate candidate for preemption. */ - +#if defined(CONFIG_SCHED_DEFERRED_AST) /* - * Processor state locking: + * -------------------- SHUTDOWN + * / ^ ^ + * _/ | \ + * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING + * \_________________^ ^ ^______/ ^_____ / / + * \__________________/ * - * Values for the processor state are defined below. If the processor - * is off-line or being shutdown, then it is only necessary to lock - * the processor to change its state. Otherwise it is only necessary - * to lock its processor set's sched_lock. Scheduler code will - * typically lock only the sched_lock, but processor manipulation code - * will often lock both. + * A DISPATCHING processor may be put back into IDLE, if another + * processor determines that the target processor will have nothing to do + * upon reaching the RUNNING state. This is racy, but if the target + * responds and becomes RUNNING, it will not break the processor state + * machine. + * + * This change allows us to cancel an outstanding signal/AST on a processor + * (if such an operation is supported through hardware or software), and + * push the processor back into the IDLE state as a power optimization. */ +#endif #define PROCESSOR_OFF_LINE 0 /* Not available */ -#define PROCESSOR_RUNNING 1 /* Normal execution */ -#define PROCESSOR_IDLE 2 /* Idle */ -#define PROCESSOR_DISPATCHING 3 /* Dispatching (idle -> running) */ -#define PROCESSOR_SHUTDOWN 4 /* Going off-line */ -#define PROCESSOR_START 5 /* Being started */ +#define PROCESSOR_SHUTDOWN 1 /* Going off-line */ +#define PROCESSOR_START 2 /* Being started */ +/* 3 Formerly Inactive (unavailable) */ +#define PROCESSOR_IDLE 4 /* Idle (available) */ +#define PROCESSOR_DISPATCHING 5 /* Dispatching (idle -> active) */ +#define PROCESSOR_RUNNING 6 /* Normal execution */ extern processor_t current_processor(void); -extern processor_t cpu_to_processor( - int cpu); +/* Lock macros, always acquired and released with interrupts disabled (splsched()) */ -/* Useful lock macros */ +#if __SMP__ +#define pset_lock(p) simple_lock(&(p)->sched_lock) +#define pset_unlock(p) simple_unlock(&(p)->sched_lock) +#define pset_lock_init(p) simple_lock_init(&(p)->sched_lock, 0) -#define pset_lock(pset) mutex_lock(&(pset)->lock) -#define pset_lock_try(pset) mutex_try(&(pset)->lock) -#define pset_unlock(pset) mutex_unlock(&(pset)->lock) +#define rt_lock_lock(p) simple_lock(&SCHED(rt_runq)(p)->rt_lock) +#define rt_lock_unlock(p) simple_unlock(&SCHED(rt_runq)(p)->rt_lock) +#define rt_lock_init(p) simple_lock_init(&SCHED(rt_runq)(p)->rt_lock, 0) +#else +#define pset_lock(p) do { (void)p; } while(0) +#define pset_unlock(p) do { (void)p; } while(0) +#define pset_lock_init(p) do { (void)p; } while(0) -#define processor_lock(pr) simple_lock(&(pr)->lock) -#define processor_unlock(pr) simple_unlock(&(pr)->lock) +#define rt_lock_lock(p) do { (void)p; } while(0) +#define rt_lock_unlock(p) do { (void)p; } while(0) +#define rt_lock_init(p) do { (void)p; } while(0) +#endif extern void processor_bootstrap(void); extern void processor_init( processor_t processor, - int slot_num); - -extern void timeshare_quanta_update( - processor_set_t pset); + int cpu_id, + processor_set_t processor_set); -extern void pset_init( - processor_set_t pset); +extern void processor_set_primary( + processor_t processor, + processor_t primary); -#define pset_run_incr(pset) \ - hw_atomic_add(&(pset)->run_count, 1) +extern kern_return_t processor_shutdown( + processor_t processor); -#define pset_run_decr(pset) \ - hw_atomic_sub(&(pset)->run_count, 1) +extern void processor_queue_shutdown( + processor_t processor); -#define pset_share_incr(pset) \ - hw_atomic_add(&(pset)->share_count, 1) +extern processor_set_t processor_pset( + processor_t processor); -#define pset_share_decr(pset) \ - hw_atomic_sub(&(pset)->share_count, 1) +extern pset_node_t pset_node_root(void); -extern kern_return_t processor_shutdown( - processor_t processor); +extern processor_set_t pset_create( + pset_node_t node); -extern void pset_remove_processor( +extern void pset_init( processor_set_t pset, - processor_t processor); + pset_node_t node); -extern void pset_add_processor( - processor_set_t pset, - processor_t processor); +extern processor_set_t pset_find( + uint32_t cluster_id, + processor_set_t default_pset); -extern void pset_remove_task( - processor_set_t pset, - task_t task); +extern kern_return_t processor_info_count( + processor_flavor_t flavor, + mach_msg_type_number_t *count); -extern void pset_add_task( - processor_set_t pset, - task_t task); +#define pset_deallocate(x) +#define pset_reference(x) -extern void pset_remove_thread( - processor_set_t pset, - thread_t thread); +extern void machine_run_count( + uint32_t count); -extern void pset_add_thread( - processor_set_t pset, - thread_t thread); +extern processor_t machine_choose_processor( + processor_set_t pset, + processor_t processor); -extern void thread_change_psets( - thread_t thread, - processor_set_t old_pset, - processor_set_t new_pset); +#define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets) +#define PSET_THING_TASK 0 +#define PSET_THING_THREAD 1 -extern kern_return_t processor_info_count( - processor_flavor_t flavor, - mach_msg_type_number_t *count); +extern kern_return_t processor_set_things( + processor_set_t pset, + void **thing_list, + mach_msg_type_number_t *count, + int type); -#endif /* MACH_KERNEL_PRIVATE */ +extern pset_cluster_type_t recommended_pset_type(thread_t thread); + +inline static bool +pset_is_recommended(processor_set_t pset) +{ + return ((pset->recommended_bitmask & pset->cpu_bitmask) != 0); +} + +extern void processor_state_update_idle(processor_t processor); +extern void processor_state_update_from_thread(processor_t processor, thread_t thread); +extern void processor_state_update_explicit(processor_t processor, int pri, + sfi_class_id_t sfi_class, pset_cluster_type_t pset_type, + perfcontrol_class_t perfctl_class); + +#else /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS @@ -269,4 +381,14 @@ extern void pset_reference( __END_DECLS +#endif /* MACH_KERNEL_PRIVATE */ + +#ifdef KERNEL_PRIVATE +__BEGIN_DECLS +extern unsigned int processor_count; +extern processor_t cpu_to_processor(int cpu); +__END_DECLS + +#endif /* KERNEL_PRIVATE */ + #endif /* _KERN_PROCESSOR_H_ */