2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Scheduling primitives
70 #include <ddb/db_output.h>
72 #include <mach/mach_types.h>
73 #include <mach/machine.h>
74 #include <mach/policy.h>
75 #include <mach/sync_policy.h>
77 #include <machine/machine_routines.h>
78 #include <machine/sched_param.h>
79 #include <machine/machine_cpu.h>
81 #include <kern/kern_types.h>
82 #include <kern/clock.h>
83 #include <kern/counters.h>
84 #include <kern/cpu_number.h>
85 #include <kern/cpu_data.h>
86 #include <kern/debug.h>
87 #include <kern/lock.h>
88 #include <kern/macro_help.h>
89 #include <kern/machine.h>
90 #include <kern/misc_protos.h>
91 #include <kern/processor.h>
92 #include <kern/queue.h>
93 #include <kern/sched.h>
94 #include <kern/sched_prim.h>
95 #include <kern/syscall_subr.h>
96 #include <kern/task.h>
97 #include <kern/thread.h>
98 #include <kern/wait_queue.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_map.h>
104 #include <sys/kdebug.h>
106 #include <kern/pms.h>
108 struct run_queue rt_runq
;
109 #define RT_RUNQ ((processor_t)-1)
110 decl_simple_lock_data(static,rt_lock
);
112 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
113 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
115 #define MAX_UNSAFE_QUANTA 800
116 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
118 #define MAX_POLL_QUANTA 2
119 int max_poll_quanta
= MAX_POLL_QUANTA
;
121 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
122 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
124 uint64_t max_unsafe_computation
;
125 uint32_t sched_safe_duration
;
126 uint64_t max_poll_computation
;
128 uint32_t std_quantum
;
129 uint32_t min_std_quantum
;
131 uint32_t std_quantum_us
;
133 uint32_t max_rt_quantum
;
134 uint32_t min_rt_quantum
;
136 uint32_t sched_cswtime
;
139 uint32_t sched_tick_interval
;
141 uint32_t sched_pri_shift
= INT8_MAX
;
142 uint32_t sched_fixed_shift
;
144 uint32_t sched_run_count
, sched_share_count
;
145 uint32_t sched_load_average
, sched_mach_factor
;
147 void (*pm_tick_callout
)(void) = NULL
;
150 void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
152 static void load_shift_init(void) __attribute__((section("__TEXT, initcode")));
153 static void preempt_pri_init(void) __attribute__((section("__TEXT, initcode")));
155 static thread_t
thread_select_idle(
157 processor_t processor
);
159 static thread_t
processor_idle(
161 processor_t processor
);
163 static thread_t
choose_thread(
164 processor_t processor
);
166 static thread_t
steal_thread(
167 processor_t processor
);
169 static void thread_update_scan(void);
172 extern int debug_task
;
173 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
175 #define TLOG(a, fmt, args...) do {} while (0)
180 boolean_t
thread_runnable(
188 * states are combinations of:
190 * W waiting (or on wait queue)
191 * N non-interruptible
196 * assert_wait thread_block clear_wait swapout swapin
198 * R RW, RWN R; setrun - -
199 * RN RWN RN; setrun - -
212 * Waiting protocols and implementation:
214 * Each thread may be waiting for exactly one event; this event
215 * is set using assert_wait(). That thread may be awakened either
216 * by performing a thread_wakeup_prim() on its event,
217 * or by directly waking that thread up with clear_wait().
219 * The implementation of wait events uses a hash table. Each
220 * bucket is queue of threads having the same hash function
221 * value; the chain for the queue (linked list) is the run queue
222 * field. [It is not possible to be waiting and runnable at the
225 * Locks on both the thread and on the hash buckets govern the
226 * wait event field and the queue chain field. Because wakeup
227 * operations only have the event as an argument, the event hash
228 * bucket must be locked before any thread.
230 * Scheduling operations may also occur at interrupt level; therefore,
231 * interrupts below splsched() must be prevented when holding
232 * thread or hash bucket locks.
234 * The wait event hash table declarations are as follows:
239 struct wait_queue wait_queues
[NUMQUEUES
];
241 #define wait_hash(event) \
242 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
244 int8_t sched_load_shifts
[NRQS
];
245 int sched_preempt_pri
[NRQBM
];
251 * Calculate the timeslicing quantum
254 if (default_preemption_rate
< 1)
255 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
256 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
258 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
260 sched_safe_duration
= (2 * max_unsafe_quanta
/ default_preemption_rate
) *
261 (1 << SCHED_TICK_SHIFT
);
266 simple_lock_init(&rt_lock
, 0);
267 run_queue_init(&rt_runq
);
273 sched_timebase_init(void)
278 /* standard timeslicing quantum */
279 clock_interval_to_absolutetime_interval(
280 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
281 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
282 std_quantum
= abstime
;
284 /* smallest remaining quantum (250 us) */
285 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
286 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
287 min_std_quantum
= abstime
;
289 /* smallest rt computaton (50 us) */
290 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
291 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
292 min_rt_quantum
= abstime
;
294 /* maximum rt computation (50 ms) */
295 clock_interval_to_absolutetime_interval(
296 50, 1000*NSEC_PER_USEC
, &abstime
);
297 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
298 max_rt_quantum
= abstime
;
300 /* scheduler tick interval */
301 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
302 NSEC_PER_USEC
, &abstime
);
303 sched_tick_interval
= abstime
;
306 printf("Quantum: %d. Smallest quantum: %d. Min Rt/Max Rt: %d/%d."
308 std_quantum
, min_std_quantum
, min_rt_quantum
, max_rt_quantum
,
309 sched_tick_interval
);
313 * Compute conversion factor from usage to
314 * timesharing priorities with 5/8 ** n aging.
316 abstime
= (abstime
* 5) / 3;
317 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
)
319 sched_fixed_shift
= shift
;
321 max_unsafe_computation
= max_unsafe_quanta
* std_quantum
;
322 max_poll_computation
= max_poll_quanta
* std_quantum
;
326 wait_queues_init(void)
330 for (i
= 0; i
< NUMQUEUES
; i
++) {
331 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
336 * Set up values for timeshare
340 load_shift_init(void)
342 int8_t k
, *p
= sched_load_shifts
;
345 *p
++ = INT8_MIN
; *p
++ = 0;
347 for (i
= j
= 2, k
= 1; i
< NRQS
; ++k
) {
348 for (j
<<= 1; i
< j
; ++i
)
354 preempt_pri_init(void)
356 int i
, *p
= sched_preempt_pri
;
358 for (i
= BASEPRI_FOREGROUND
+ 1; i
< MINPRI_KERNEL
; ++i
)
361 for (i
= BASEPRI_PREEMPT
; i
<= MAXPRI
; ++i
)
366 * Thread wait timer expiration.
373 thread_t thread
= p0
;
378 if (--thread
->wait_timer_active
== 0) {
379 if (thread
->wait_timer_is_set
) {
380 thread
->wait_timer_is_set
= FALSE
;
381 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
384 thread_unlock(thread
);
391 * Set a timer for the current thread, if the thread
392 * is ready to wait. Must be called between assert_wait()
393 * and thread_block().
398 uint32_t scale_factor
)
400 thread_t thread
= current_thread();
406 if ((thread
->state
& TH_WAIT
) != 0) {
407 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
408 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
409 thread
->wait_timer_active
++;
410 thread
->wait_timer_is_set
= TRUE
;
412 thread_unlock(thread
);
417 thread_set_timer_deadline(
420 thread_t thread
= current_thread();
425 if ((thread
->state
& TH_WAIT
) != 0) {
426 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
427 thread
->wait_timer_active
++;
428 thread
->wait_timer_is_set
= TRUE
;
430 thread_unlock(thread
);
435 thread_cancel_timer(void)
437 thread_t thread
= current_thread();
442 if (thread
->wait_timer_is_set
) {
443 if (timer_call_cancel(&thread
->wait_timer
))
444 thread
->wait_timer_active
--;
445 thread
->wait_timer_is_set
= FALSE
;
447 thread_unlock(thread
);
454 * Unblock thread on wake up.
456 * Returns TRUE if the thread is still running.
458 * Thread must be locked.
463 wait_result_t wresult
)
465 boolean_t result
= FALSE
;
470 thread
->wait_result
= wresult
;
473 * Cancel pending wait timer.
475 if (thread
->wait_timer_is_set
) {
476 if (timer_call_cancel(&thread
->wait_timer
))
477 thread
->wait_timer_active
--;
478 thread
->wait_timer_is_set
= FALSE
;
482 * Update scheduling state: not waiting,
485 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
487 if (!(thread
->state
& TH_RUN
)) {
488 thread
->state
|= TH_RUN
;
490 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
496 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
501 * Signal if idling on another processor.
503 if (thread
->state
& TH_IDLE
) {
504 processor_t processor
= thread
->last_processor
;
506 if (processor
!= current_processor())
507 machine_signal_idle(processor
);
514 * Calculate deadline for real-time threads.
516 if (thread
->sched_mode
& TH_MODE_REALTIME
) {
517 thread
->realtime
.deadline
= mach_absolute_time();
518 thread
->realtime
.deadline
+= thread
->realtime
.constraint
;
522 * Clear old quantum, fail-safe computation, etc.
524 thread
->current_quantum
= 0;
525 thread
->computation_metered
= 0;
526 thread
->reason
= AST_NONE
;
528 KERNEL_DEBUG_CONSTANT(
529 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
530 (int)thread
, (int)thread
->sched_pri
, 0, 0, 0);
538 * Unblock and dispatch thread.
540 * thread lock held, IPC locks may be held.
541 * thread must have been pulled from wait queue under same lock hold.
543 * KERN_SUCCESS - Thread was set running
544 * KERN_NOT_WAITING - Thread was not waiting
549 wait_result_t wresult
)
551 assert(thread
->at_safe_point
== FALSE
);
552 assert(thread
->wait_event
== NO_EVENT64
);
553 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
555 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
) {
556 if (!thread_unblock(thread
, wresult
))
557 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
559 return (KERN_SUCCESS
);
562 return (KERN_NOT_WAITING
);
566 * Routine: thread_mark_wait_locked
568 * Mark a thread as waiting. If, given the circumstances,
569 * it doesn't want to wait (i.e. already aborted), then
570 * indicate that in the return value.
572 * at splsched() and thread is locked.
576 thread_mark_wait_locked(
578 wait_interrupt_t interruptible
)
580 boolean_t at_safe_point
;
583 * The thread may have certain types of interrupts/aborts masked
584 * off. Even if the wait location says these types of interrupts
585 * are OK, we have to honor mask settings (outer-scoped code may
586 * not be able to handle aborts at the moment).
588 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
))
589 interruptible
= thread
->options
& TH_OPT_INTMASK
;
591 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
593 if ( interruptible
== THREAD_UNINT
||
594 !(thread
->sched_mode
& TH_MODE_ABORT
) ||
596 (thread
->sched_mode
& TH_MODE_ABORTSAFELY
))) {
597 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
598 thread
->at_safe_point
= at_safe_point
;
599 return (thread
->wait_result
= THREAD_WAITING
);
602 if (thread
->sched_mode
& TH_MODE_ABORTSAFELY
)
603 thread
->sched_mode
&= ~TH_MODE_ISABORTED
;
605 return (thread
->wait_result
= THREAD_INTERRUPTED
);
609 * Routine: thread_interrupt_level
611 * Set the maximum interruptible state for the
612 * current thread. The effective value of any
613 * interruptible flag passed into assert_wait
614 * will never exceed this.
616 * Useful for code that must not be interrupted,
617 * but which calls code that doesn't know that.
619 * The old interrupt level for the thread.
623 thread_interrupt_level(
624 wait_interrupt_t new_level
)
626 thread_t thread
= current_thread();
627 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
629 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
635 * Check to see if an assert wait is possible, without actually doing one.
636 * This is used by debug code in locks and elsewhere to verify that it is
637 * always OK to block when trying to take a blocking lock (since waiting
638 * for the actual assert_wait to catch the case may make it hard to detect
642 assert_wait_possible(void)
648 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
651 thread
= current_thread();
653 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
659 * Assert that the current thread is about to go to
660 * sleep until the specified event occurs.
665 wait_interrupt_t interruptible
)
667 register wait_queue_t wq
;
670 assert(event
!= NO_EVENT
);
672 index
= wait_hash(event
);
673 wq
= &wait_queues
[index
];
674 return wait_queue_assert_wait(wq
, event
, interruptible
, 0);
680 wait_interrupt_t interruptible
,
682 uint32_t scale_factor
)
684 thread_t thread
= current_thread();
685 wait_result_t wresult
;
690 assert(event
!= NO_EVENT
);
691 wqueue
= &wait_queues
[wait_hash(event
)];
694 wait_queue_lock(wqueue
);
697 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
698 wresult
= wait_queue_assert_wait64_locked(wqueue
, (uint32_t)event
,
699 interruptible
, deadline
, thread
);
701 thread_unlock(thread
);
702 wait_queue_unlock(wqueue
);
709 assert_wait_deadline(
711 wait_interrupt_t interruptible
,
714 thread_t thread
= current_thread();
715 wait_result_t wresult
;
719 assert(event
!= NO_EVENT
);
720 wqueue
= &wait_queues
[wait_hash(event
)];
723 wait_queue_lock(wqueue
);
726 wresult
= wait_queue_assert_wait64_locked(wqueue
, (uint32_t)event
,
727 interruptible
, deadline
, thread
);
729 thread_unlock(thread
);
730 wait_queue_unlock(wqueue
);
737 * thread_sleep_fast_usimple_lock:
739 * Cause the current thread to wait until the specified event
740 * occurs. The specified simple_lock is unlocked before releasing
741 * the cpu and re-acquired as part of waking up.
743 * This is the simple lock sleep interface for components that use a
744 * faster version of simple_lock() than is provided by usimple_lock().
746 __private_extern__ wait_result_t
747 thread_sleep_fast_usimple_lock(
750 wait_interrupt_t interruptible
)
754 res
= assert_wait(event
, interruptible
);
755 if (res
== THREAD_WAITING
) {
757 res
= thread_block(THREAD_CONTINUE_NULL
);
765 * thread_sleep_usimple_lock:
767 * Cause the current thread to wait until the specified event
768 * occurs. The specified usimple_lock is unlocked before releasing
769 * the cpu and re-acquired as part of waking up.
771 * This is the simple lock sleep interface for components where
772 * simple_lock() is defined in terms of usimple_lock().
775 thread_sleep_usimple_lock(
778 wait_interrupt_t interruptible
)
782 res
= assert_wait(event
, interruptible
);
783 if (res
== THREAD_WAITING
) {
784 usimple_unlock(lock
);
785 res
= thread_block(THREAD_CONTINUE_NULL
);
792 * thread_sleep_mutex:
794 * Cause the current thread to wait until the specified event
795 * occurs. The specified mutex is unlocked before releasing
796 * the cpu. The mutex will be re-acquired before returning.
798 * JMM - Add hint to make sure mutex is available before rousting
804 wait_interrupt_t interruptible
)
808 res
= assert_wait(event
, interruptible
);
809 if (res
== THREAD_WAITING
) {
811 res
= thread_block(THREAD_CONTINUE_NULL
);
818 * thread_sleep_mutex_deadline:
820 * Cause the current thread to wait until the specified event
821 * (or deadline) occurs. The specified mutex is unlocked before
822 * releasing the cpu. The mutex will be re-acquired before returning.
825 thread_sleep_mutex_deadline(
829 wait_interrupt_t interruptible
)
833 res
= assert_wait_deadline(event
, interruptible
, deadline
);
834 if (res
== THREAD_WAITING
) {
836 res
= thread_block(THREAD_CONTINUE_NULL
);
843 * thread_sleep_lock_write:
845 * Cause the current thread to wait until the specified event
846 * occurs. The specified (write) lock is unlocked before releasing
847 * the cpu. The (write) lock will be re-acquired before returning.
850 thread_sleep_lock_write(
853 wait_interrupt_t interruptible
)
857 res
= assert_wait(event
, interruptible
);
858 if (res
== THREAD_WAITING
) {
859 lock_write_done(lock
);
860 res
= thread_block(THREAD_CONTINUE_NULL
);
869 * Force a preemption point for a thread and wait
870 * for it to stop running. Arbitrates access among
871 * multiple stop requests. (released by unstop)
873 * The thread must enter a wait state and stop via a
876 * Returns FALSE if interrupted.
882 wait_result_t wresult
;
883 spl_t s
= splsched();
888 while (thread
->state
& TH_SUSP
) {
889 thread
->wake_active
= TRUE
;
890 thread_unlock(thread
);
892 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
896 if (wresult
== THREAD_WAITING
)
897 wresult
= thread_block(THREAD_CONTINUE_NULL
);
899 if (wresult
!= THREAD_AWAKENED
)
907 thread
->state
|= TH_SUSP
;
909 while (thread
->state
& TH_RUN
) {
910 processor_t processor
= thread
->last_processor
;
912 if (processor
!= PROCESSOR_NULL
&& processor
->active_thread
== thread
)
913 cause_ast_check(processor
);
915 thread
->wake_active
= TRUE
;
916 thread_unlock(thread
);
918 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
922 if (wresult
== THREAD_WAITING
)
923 wresult
= thread_block(THREAD_CONTINUE_NULL
);
925 if (wresult
!= THREAD_AWAKENED
) {
926 thread_unstop(thread
);
935 thread_unlock(thread
);
945 * Release a previous stop request and set
946 * the thread running if appropriate.
948 * Use only after a successful stop operation.
954 spl_t s
= splsched();
959 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) == TH_SUSP
) {
960 thread
->state
&= ~TH_SUSP
;
961 thread_unblock(thread
, THREAD_AWAKENED
);
963 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
966 if (thread
->state
& TH_SUSP
) {
967 thread
->state
&= ~TH_SUSP
;
969 if (thread
->wake_active
) {
970 thread
->wake_active
= FALSE
;
971 thread_unlock(thread
);
973 thread_wakeup(&thread
->wake_active
);
981 thread_unlock(thread
);
989 * Wait for a thread to stop running. (non-interruptible)
996 wait_result_t wresult
;
997 spl_t s
= splsched();
1000 thread_lock(thread
);
1002 while (thread
->state
& TH_RUN
) {
1003 processor_t processor
= thread
->last_processor
;
1005 if (processor
!= PROCESSOR_NULL
&& processor
->active_thread
== thread
)
1006 cause_ast_check(processor
);
1008 thread
->wake_active
= TRUE
;
1009 thread_unlock(thread
);
1011 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
1012 wake_unlock(thread
);
1015 if (wresult
== THREAD_WAITING
)
1016 thread_block(THREAD_CONTINUE_NULL
);
1020 thread_lock(thread
);
1023 thread_unlock(thread
);
1024 wake_unlock(thread
);
1029 * Routine: clear_wait_internal
1031 * Clear the wait condition for the specified thread.
1032 * Start the thread executing if that is appropriate.
1034 * thread thread to awaken
1035 * result Wakeup result the thread should see
1038 * the thread is locked.
1040 * KERN_SUCCESS thread was rousted out a wait
1041 * KERN_FAILURE thread was waiting but could not be rousted
1042 * KERN_NOT_WAITING thread was not waiting
1044 __private_extern__ kern_return_t
1045 clear_wait_internal(
1047 wait_result_t wresult
)
1049 wait_queue_t wq
= thread
->wait_queue
;
1050 int i
= LockTimeOut
;
1053 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
))
1054 return (KERN_FAILURE
);
1056 if (wq
!= WAIT_QUEUE_NULL
) {
1057 if (wait_queue_lock_try(wq
)) {
1058 wait_queue_pull_thread_locked(wq
, thread
, TRUE
);
1059 /* wait queue unlocked, thread still locked */
1062 thread_unlock(thread
);
1065 thread_lock(thread
);
1066 if (wq
!= thread
->wait_queue
)
1067 return (KERN_NOT_WAITING
);
1073 return (thread_go(thread
, wresult
));
1076 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1077 thread
, wq
, cpu_number());
1079 return (KERN_FAILURE
);
1086 * Clear the wait condition for the specified thread. Start the thread
1087 * executing if that is appropriate.
1090 * thread thread to awaken
1091 * result Wakeup result the thread should see
1096 wait_result_t result
)
1102 thread_lock(thread
);
1103 ret
= clear_wait_internal(thread
, result
);
1104 thread_unlock(thread
);
1111 * thread_wakeup_prim:
1113 * Common routine for thread_wakeup, thread_wakeup_with_result,
1114 * and thread_wakeup_one.
1120 boolean_t one_thread
,
1121 wait_result_t result
)
1123 register wait_queue_t wq
;
1126 index
= wait_hash(event
);
1127 wq
= &wait_queues
[index
];
1129 return (wait_queue_wakeup_one(wq
, event
, result
));
1131 return (wait_queue_wakeup_all(wq
, event
, result
));
1137 * Force the current thread to execute on the specified processor.
1139 * Returns the previous binding. PROCESSOR_NULL means
1142 * XXX - DO NOT export this to users - XXX
1146 processor_t processor
)
1148 thread_t self
= current_thread();
1155 prev
= self
->bound_processor
;
1156 self
->bound_processor
= processor
;
1158 thread_unlock(self
);
1167 * Select a new thread for the current processor to execute.
1169 * May select the current thread, which must be locked.
1174 processor_t processor
)
1176 processor_set_t pset
= processor
->processor_set
;
1177 thread_t new_thread
;
1178 boolean_t other_runnable
;
1182 * Update the priority.
1184 if (thread
->sched_stamp
!= sched_tick
)
1185 update_priority(thread
);
1187 processor
->current_pri
= thread
->sched_pri
;
1191 simple_lock(&rt_lock
);
1194 * Check for other runnable threads.
1196 other_runnable
= processor
->runq
.count
> 0 || rt_runq
.count
> 0;
1199 * Test to see if the current thread should continue
1200 * to run on this processor. Must be runnable, and not
1201 * bound to a different processor, nor be in the wrong
1204 if ( thread
->state
== TH_RUN
&&
1205 (thread
->bound_processor
== PROCESSOR_NULL
||
1206 thread
->bound_processor
== processor
) &&
1207 (thread
->affinity_set
== AFFINITY_SET_NULL
||
1208 thread
->affinity_set
->aset_pset
== pset
) ) {
1209 if ( thread
->sched_pri
>= BASEPRI_RTQUEUES
&&
1210 first_timeslice(processor
) ) {
1211 if (rt_runq
.highq
>= BASEPRI_RTQUEUES
) {
1212 register run_queue_t runq
= &rt_runq
;
1215 q
= runq
->queues
+ runq
->highq
;
1216 if (((thread_t
)q
->next
)->realtime
.deadline
<
1217 processor
->deadline
) {
1218 thread
= (thread_t
)q
->next
;
1219 ((queue_entry_t
)thread
)->next
->prev
= q
;
1220 q
->next
= ((queue_entry_t
)thread
)->next
;
1221 thread
->runq
= PROCESSOR_NULL
;
1222 runq
->count
--; runq
->urgency
--;
1223 assert(runq
->urgency
>= 0);
1224 if (queue_empty(q
)) {
1225 if (runq
->highq
!= IDLEPRI
)
1226 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
1227 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
1232 simple_unlock(&rt_lock
);
1234 processor
->deadline
= thread
->realtime
.deadline
;
1241 if ( (!other_runnable
||
1242 (processor
->runq
.highq
< thread
->sched_pri
&&
1243 rt_runq
.highq
< thread
->sched_pri
)) ) {
1245 simple_unlock(&rt_lock
);
1247 /* I am the highest priority runnable (non-idle) thread */
1249 pset_hint_low(pset
, processor
);
1250 pset_hint_high(pset
, processor
);
1252 processor
->deadline
= UINT64_MAX
;
1261 return choose_thread(processor
);
1263 simple_unlock(&rt_lock
);
1266 * No runnable threads, attempt to steal
1267 * from other processors.
1269 if (pset
->high_hint
!= PROCESSOR_NULL
&& pset
->high_hint
->runq
.count
> 0) {
1270 new_thread
= steal_thread(pset
->high_hint
);
1271 if (new_thread
!= THREAD_NULL
) {
1274 return (new_thread
);
1279 * Nothing is runnable, so set this processor idle if it
1282 if (processor
->state
== PROCESSOR_RUNNING
) {
1283 remqueue(&pset
->active_queue
, (queue_entry_t
)processor
);
1284 processor
->state
= PROCESSOR_IDLE
;
1286 enqueue_head(&pset
->idle_queue
, (queue_entry_t
)processor
);
1287 pset
->low_hint
= processor
;
1291 processor
->deadline
= UINT64_MAX
;
1296 * Choose idle thread if fast idle is not possible.
1298 if ((thread
->state
& (TH_IDLE
|TH_TERMINATE
|TH_SUSP
)) || !(thread
->state
& TH_WAIT
) || thread
->wake_active
)
1299 return (processor
->idle_thread
);
1302 * Perform idling activities directly without a
1303 * context switch. Return dispatched thread,
1304 * else check again for a runnable thread.
1306 new_thread
= thread_select_idle(thread
, processor
);
1308 } while (new_thread
== THREAD_NULL
);
1310 return (new_thread
);
1314 * thread_select_idle:
1316 * Idle the processor using the current thread context.
1318 * Called with thread locked, then dropped and relocked.
1323 processor_t processor
)
1325 thread_t new_thread
;
1327 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
1331 thread
->state
|= TH_IDLE
;
1332 processor
->current_pri
= IDLEPRI
;
1334 thread_unlock(thread
);
1337 * Switch execution timing to processor idle thread.
1339 processor
->last_dispatch
= mach_absolute_time();
1340 thread_timer_event(processor
->last_dispatch
, &processor
->idle_thread
->system_timer
);
1341 PROCESSOR_DATA(processor
, kernel_timer
) = &processor
->idle_thread
->system_timer
;
1344 * Cancel the quantum timer while idling.
1346 timer_call_cancel(&processor
->quantum_timer
);
1347 processor
->timeslice
= 0;
1349 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
1352 * Enable interrupts and perform idling activities. No
1353 * preemption due to TH_IDLE being set.
1355 spllo(); new_thread
= processor_idle(thread
, processor
);
1357 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
1359 thread_lock(thread
);
1362 * If awakened, switch to thread timer and start a new quantum.
1363 * Otherwise skip; we will context switch to another thread or return here.
1365 if (!(thread
->state
& TH_WAIT
)) {
1366 processor
->last_dispatch
= mach_absolute_time();
1367 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
1368 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
1370 thread_quantum_init(thread
);
1372 processor
->quantum_end
= processor
->last_dispatch
+ thread
->current_quantum
;
1373 timer_call_enter1(&processor
->quantum_timer
, thread
, processor
->quantum_end
);
1374 processor
->timeslice
= 1;
1376 thread
->computation_epoch
= processor
->last_dispatch
;
1379 thread
->state
&= ~TH_IDLE
;
1382 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
1385 return (new_thread
);
1389 * Perform a context switch and start executing the new thread.
1391 * Returns FALSE on failure, and the thread is re-dispatched.
1393 * Called at splsched.
1396 #define funnel_release_check(thread, debug) \
1398 if ((thread)->funnel_state & TH_FN_OWNED) { \
1399 (thread)->funnel_state = TH_FN_REFUNNEL; \
1400 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \
1401 (thread)->funnel_lock, (debug), 0, 0, 0); \
1402 funnel_unlock((thread)->funnel_lock); \
1406 #define funnel_refunnel_check(thread, debug) \
1408 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1409 kern_return_t result = (thread)->wait_result; \
1411 (thread)->funnel_state = 0; \
1412 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \
1413 (thread)->funnel_lock, (debug), 0, 0, 0); \
1414 funnel_lock((thread)->funnel_lock); \
1415 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \
1416 (thread)->funnel_lock, (debug), 0, 0, 0); \
1417 (thread)->funnel_state = TH_FN_OWNED; \
1418 (thread)->wait_result = result; \
1424 register thread_t self
,
1425 register thread_t thread
,
1428 thread_continue_t continuation
= self
->continuation
;
1429 void *parameter
= self
->parameter
;
1430 processor_t processor
;
1432 if (get_preemption_level() != 0)
1433 panic("thread_invoke: preemption_level %d\n",
1434 get_preemption_level());
1436 assert(self
== current_thread());
1439 * Mark thread interruptible.
1441 thread_lock(thread
);
1442 thread
->state
&= ~TH_UNINT
;
1445 assert(thread_runnable(thread
));
1449 * Allow time constraint threads to hang onto
1452 if ((self
->sched_mode
& TH_MODE_REALTIME
) && !self
->reserved_stack
)
1453 self
->reserved_stack
= self
->kernel_stack
;
1455 if (continuation
!= NULL
) {
1456 if (!thread
->kernel_stack
) {
1458 * If we are using a privileged stack,
1459 * check to see whether we can exchange it with
1460 * that of the other thread.
1462 if (self
->kernel_stack
== self
->reserved_stack
&& !thread
->reserved_stack
)
1466 * Context switch by performing a stack handoff.
1468 continuation
= thread
->continuation
;
1469 parameter
= thread
->parameter
;
1471 processor
= current_processor();
1472 processor
->active_thread
= thread
;
1473 processor
->current_pri
= thread
->sched_pri
;
1474 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
1475 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
1476 thread
->ps_switch
++;
1479 thread
->last_processor
= processor
;
1481 ast_context(thread
);
1482 thread_unlock(thread
);
1484 self
->reason
= reason
;
1486 processor
->last_dispatch
= mach_absolute_time();
1487 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
1488 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
1490 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_HANDOFF
)|DBG_FUNC_NONE
,
1491 self
->reason
, (int)thread
, self
->sched_pri
, thread
->sched_pri
, 0);
1493 TLOG(1, "thread_invoke: calling machine_stack_handoff\n");
1494 machine_stack_handoff(self
, thread
);
1496 thread_dispatch(self
, thread
);
1498 thread
->continuation
= thread
->parameter
= NULL
;
1500 counter(c_thread_invoke_hits
++);
1502 funnel_refunnel_check(thread
, 2);
1505 assert(continuation
);
1506 call_continuation(continuation
, parameter
, thread
->wait_result
);
1509 else if (thread
== self
) {
1510 /* same thread but with continuation */
1512 counter(++c_thread_invoke_same
);
1513 thread_unlock(self
);
1515 self
->continuation
= self
->parameter
= NULL
;
1517 funnel_refunnel_check(self
, 3);
1520 call_continuation(continuation
, parameter
, self
->wait_result
);
1526 * Check that the other thread has a stack
1528 if (!thread
->kernel_stack
) {
1530 if (!stack_alloc_try(thread
)) {
1531 counter(c_thread_invoke_misses
++);
1532 thread_unlock(thread
);
1533 thread_stack_enqueue(thread
);
1537 else if (thread
== self
) {
1539 counter(++c_thread_invoke_same
);
1540 thread_unlock(self
);
1546 * Context switch by full context save.
1548 processor
= current_processor();
1549 processor
->active_thread
= thread
;
1550 processor
->current_pri
= thread
->sched_pri
;
1551 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
1552 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
1553 thread
->ps_switch
++;
1556 thread
->last_processor
= processor
;
1558 ast_context(thread
);
1559 thread_unlock(thread
);
1561 counter(c_thread_invoke_csw
++);
1563 assert(self
->runq
== PROCESSOR_NULL
);
1564 self
->reason
= reason
;
1566 processor
->last_dispatch
= mach_absolute_time();
1567 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
1568 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
1570 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
1571 (int)self
->reason
, (int)thread
, self
->sched_pri
, thread
->sched_pri
, 0);
1574 * This is where we actually switch register context,
1575 * and address space if required. We will next run
1576 * as a result of a subsequent context switch.
1578 thread
= machine_switch_context(self
, continuation
, thread
);
1579 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self
, continuation
, thread
);
1582 * We have been resumed and are set to run.
1584 thread_dispatch(thread
, self
);
1587 self
->continuation
= self
->parameter
= NULL
;
1589 funnel_refunnel_check(self
, 3);
1592 call_continuation(continuation
, parameter
, self
->wait_result
);
1602 * Handle threads at context switch. Re-dispatch other thread
1603 * if still running, otherwise update run state and perform
1604 * special actions. Update quantum for other thread and begin
1605 * the quantum for ourselves.
1607 * Called at splsched.
1614 processor_t processor
= self
->last_processor
;
1616 if (thread
!= THREAD_NULL
) {
1618 * If blocked at a continuation, discard
1621 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
!= 0)
1624 if (!(thread
->state
& TH_IDLE
)) {
1626 thread_lock(thread
);
1629 * Compute remainder of current quantum.
1631 if ( first_timeslice(processor
) &&
1632 processor
->quantum_end
> processor
->last_dispatch
)
1633 thread
->current_quantum
= (processor
->quantum_end
- processor
->last_dispatch
);
1635 thread
->current_quantum
= 0;
1637 if (thread
->sched_mode
& TH_MODE_REALTIME
) {
1639 * Cancel the deadline if the thread has
1640 * consumed the entire quantum.
1642 if (thread
->current_quantum
== 0) {
1643 thread
->realtime
.deadline
= UINT64_MAX
;
1644 thread
->reason
|= AST_QUANTUM
;
1649 * For non-realtime threads treat a tiny
1650 * remaining quantum as an expired quantum
1651 * but include what's left next time.
1653 if (thread
->current_quantum
< min_std_quantum
) {
1654 thread
->reason
|= AST_QUANTUM
;
1655 thread
->current_quantum
+= std_quantum
;
1660 * If we are doing a direct handoff then
1661 * take the remainder of the quantum.
1663 if ((thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
1664 self
->current_quantum
= thread
->current_quantum
;
1665 thread
->reason
|= AST_QUANTUM
;
1666 thread
->current_quantum
= 0;
1669 thread
->last_switch
= processor
->last_dispatch
;
1671 thread
->computation_metered
+= (thread
->last_switch
- thread
->computation_epoch
);
1673 if (!(thread
->state
& TH_WAIT
)) {
1677 if (thread
->reason
& AST_QUANTUM
)
1678 thread_setrun(thread
, SCHED_TAILQ
);
1680 if (thread
->reason
& AST_PREEMPT
)
1681 thread_setrun(thread
, SCHED_HEADQ
);
1683 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1685 thread
->reason
= AST_NONE
;
1687 thread_unlock(thread
);
1688 wake_unlock(thread
);
1694 thread
->state
&= ~TH_RUN
;
1696 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
1700 if (thread
->wake_active
) {
1701 thread
->wake_active
= FALSE
;
1702 thread_unlock(thread
);
1704 thread_wakeup(&thread
->wake_active
);
1707 thread_unlock(thread
);
1709 wake_unlock(thread
);
1711 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
1713 if (thread
->state
& TH_TERMINATE
)
1714 thread_terminate_enqueue(thread
);
1719 if (!(self
->state
& TH_IDLE
)) {
1721 * Get a new quantum if none remaining.
1723 if (self
->current_quantum
== 0)
1724 thread_quantum_init(self
);
1727 * Set up quantum timer and timeslice.
1729 processor
->quantum_end
= (processor
->last_dispatch
+ self
->current_quantum
);
1730 timer_call_enter1(&processor
->quantum_timer
, self
, processor
->quantum_end
);
1732 processor
->timeslice
= 1;
1734 self
->last_switch
= processor
->last_dispatch
;
1736 self
->computation_epoch
= self
->last_switch
;
1739 timer_call_cancel(&processor
->quantum_timer
);
1740 processor
->timeslice
= 0;
1745 * thread_block_reason:
1747 * Forces a reschedule, blocking the caller if a wait
1748 * has been asserted.
1750 * If a continuation is specified, then thread_invoke will
1751 * attempt to discard the thread's kernel stack. When the
1752 * thread resumes, it will execute the continuation function
1753 * on a new kernel stack.
1755 counter(mach_counter_t c_thread_block_calls
= 0;)
1758 thread_block_reason(
1759 thread_continue_t continuation
,
1763 register thread_t self
= current_thread();
1764 register processor_t processor
;
1765 register thread_t new_thread
;
1768 counter(++c_thread_block_calls
);
1772 if (!(reason
& AST_PREEMPT
))
1773 funnel_release_check(self
, 2);
1775 processor
= current_processor();
1777 /* If we're explicitly yielding, force a subsequent quantum */
1778 if (reason
& AST_YIELD
)
1779 processor
->timeslice
= 0;
1781 /* We're handling all scheduling AST's */
1782 ast_off(AST_SCHEDULING
);
1784 self
->continuation
= continuation
;
1785 self
->parameter
= parameter
;
1789 new_thread
= thread_select(self
, processor
);
1790 thread_unlock(self
);
1791 } while (!thread_invoke(self
, new_thread
, reason
));
1793 funnel_refunnel_check(self
, 5);
1796 return (self
->wait_result
);
1802 * Block the current thread if a wait has been asserted.
1806 thread_continue_t continuation
)
1808 return thread_block_reason(continuation
, NULL
, AST_NONE
);
1812 thread_block_parameter(
1813 thread_continue_t continuation
,
1816 return thread_block_reason(continuation
, parameter
, AST_NONE
);
1822 * Switch directly from the current thread to the
1823 * new thread, handing off our quantum if appropriate.
1825 * New thread must be runnable, and not on a run queue.
1827 * Called at splsched.
1832 thread_continue_t continuation
,
1834 thread_t new_thread
)
1836 ast_t handoff
= AST_HANDOFF
;
1838 funnel_release_check(self
, 3);
1840 self
->continuation
= continuation
;
1841 self
->parameter
= parameter
;
1843 while (!thread_invoke(self
, new_thread
, handoff
)) {
1844 processor_t processor
= current_processor();
1847 new_thread
= thread_select(self
, processor
);
1848 thread_unlock(self
);
1852 funnel_refunnel_check(self
, 6);
1854 return (self
->wait_result
);
1860 * Called at splsched when a thread first receives
1861 * a new stack after a continuation.
1865 register thread_t thread
)
1867 register thread_t self
= current_thread();
1868 register thread_continue_t continuation
;
1869 register void *parameter
;
1871 continuation
= self
->continuation
;
1872 parameter
= self
->parameter
;
1874 thread_dispatch(thread
, self
);
1876 self
->continuation
= self
->parameter
= NULL
;
1878 funnel_refunnel_check(self
, 4);
1880 if (thread
!= THREAD_NULL
)
1883 TLOG(1, "thread_continue: calling call_continuation \n");
1884 call_continuation(continuation
, parameter
, self
->wait_result
);
1891 * Initialize a run queue before first use.
1899 rq
->highq
= IDLEPRI
;
1900 for (i
= 0; i
< NRQBM
; i
++)
1902 setbit(MAXPRI
- IDLEPRI
, rq
->bitmap
);
1903 rq
->urgency
= rq
->count
= 0;
1904 for (i
= 0; i
< NRQS
; i
++)
1905 queue_init(&rq
->queues
[i
]);
1909 * run_queue_dequeue:
1911 * Perform a dequeue operation on a run queue,
1912 * and return the resulting thread.
1914 * The run queue must be locked (see run_queue_remove()
1915 * for more info), and not empty.
1923 queue_t queue
= rq
->queues
+ rq
->highq
;
1925 if (options
& SCHED_HEADQ
) {
1926 thread
= (thread_t
)queue
->next
;
1927 ((queue_entry_t
)thread
)->next
->prev
= queue
;
1928 queue
->next
= ((queue_entry_t
)thread
)->next
;
1931 thread
= (thread_t
)queue
->prev
;
1932 ((queue_entry_t
)thread
)->prev
->next
= queue
;
1933 queue
->prev
= ((queue_entry_t
)thread
)->prev
;
1936 thread
->runq
= PROCESSOR_NULL
;
1938 if (testbit(rq
->highq
, sched_preempt_pri
)) {
1939 rq
->urgency
--; assert(rq
->urgency
>= 0);
1941 if (queue_empty(queue
)) {
1942 if (rq
->highq
!= IDLEPRI
)
1943 clrbit(MAXPRI
- rq
->highq
, rq
->bitmap
);
1944 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
1951 * realtime_queue_insert:
1953 * Enqueue a thread for realtime execution.
1956 realtime_queue_insert(
1959 run_queue_t rq
= &rt_runq
;
1960 queue_t queue
= rq
->queues
+ thread
->sched_pri
;
1961 uint64_t deadline
= thread
->realtime
.deadline
;
1962 boolean_t preempt
= FALSE
;
1964 simple_lock(&rt_lock
);
1966 if (queue_empty(queue
)) {
1967 enqueue_tail(queue
, (queue_entry_t
)thread
);
1969 setbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
1970 if (thread
->sched_pri
> rq
->highq
)
1971 rq
->highq
= thread
->sched_pri
;
1975 register thread_t entry
= (thread_t
)queue_first(queue
);
1978 if ( queue_end(queue
, (queue_entry_t
)entry
) ||
1979 deadline
< entry
->realtime
.deadline
) {
1980 entry
= (thread_t
)queue_prev((queue_entry_t
)entry
);
1984 entry
= (thread_t
)queue_next((queue_entry_t
)entry
);
1987 if ((queue_entry_t
)entry
== queue
)
1990 insque((queue_entry_t
)thread
, (queue_entry_t
)entry
);
1993 thread
->runq
= RT_RUNQ
;
1994 rq
->count
++; rq
->urgency
++;
1996 simple_unlock(&rt_lock
);
2004 * Dispatch a thread for realtime execution.
2006 * Thread must be locked. Associated pset must
2007 * be locked, and is returned unlocked.
2011 processor_t processor
,
2014 processor_set_t pset
= processor
->processor_set
;
2017 * Dispatch directly onto idle processor.
2019 if (processor
->state
== PROCESSOR_IDLE
) {
2020 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2022 enqueue_head(&pset
->active_queue
, (queue_entry_t
)processor
);
2024 processor
->next_thread
= thread
;
2025 processor
->deadline
= thread
->realtime
.deadline
;
2026 processor
->state
= PROCESSOR_DISPATCHING
;
2029 if (processor
!= current_processor())
2030 machine_signal_idle(processor
);
2034 if (realtime_queue_insert(thread
)) {
2035 if (processor
== current_processor())
2036 ast_on(AST_PREEMPT
| AST_URGENT
);
2038 cause_ast_check(processor
);
2045 * processor_enqueue:
2047 * Enqueue thread on a processor run queue. Thread must be locked,
2048 * and not already be on a run queue.
2050 * Returns TRUE if a preemption is indicated based on the state
2053 * The run queue must be locked (see run_queue_remove()
2058 processor_t processor
,
2062 run_queue_t rq
= &processor
->runq
;
2063 queue_t queue
= rq
->queues
+ thread
->sched_pri
;
2064 boolean_t result
= FALSE
;
2066 if (queue_empty(queue
)) {
2067 enqueue_tail(queue
, (queue_entry_t
)thread
);
2069 setbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2070 if (thread
->sched_pri
> rq
->highq
) {
2071 rq
->highq
= thread
->sched_pri
;
2076 if (options
& SCHED_TAILQ
)
2077 enqueue_tail(queue
, (queue_entry_t
)thread
);
2079 enqueue_head(queue
, (queue_entry_t
)thread
);
2081 thread
->runq
= processor
;
2082 if (testbit(thread
->sched_pri
, sched_preempt_pri
))
2092 * Dispatch a thread for execution on a
2095 * Thread must be locked. Associated pset must
2096 * be locked, and is returned unlocked.
2100 processor_t processor
,
2104 processor_set_t pset
= processor
->processor_set
;
2108 * Dispatch directly onto idle processor.
2110 if (processor
->state
== PROCESSOR_IDLE
) {
2111 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2113 enqueue_head(&pset
->active_queue
, (queue_entry_t
)processor
);
2115 processor
->next_thread
= thread
;
2116 processor
->deadline
= UINT64_MAX
;
2117 processor
->state
= PROCESSOR_DISPATCHING
;
2120 if (processor
!= current_processor())
2121 machine_signal_idle(processor
);
2126 * Set preemption mode.
2128 if (testbit(thread
->sched_pri
, sched_preempt_pri
))
2129 preempt
= (AST_PREEMPT
| AST_URGENT
);
2131 if (thread
->sched_mode
& TH_MODE_TIMESHARE
&& thread
->priority
< BASEPRI_BACKGROUND
)
2134 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
2136 if (!processor_enqueue(processor
, thread
, options
))
2139 pset_hint_high(pset
, processor
);
2141 if (preempt
!= AST_NONE
) {
2142 if (processor
== current_processor()) {
2143 thread_t self
= processor
->active_thread
;
2145 if (csw_needed(self
, processor
))
2149 if ( (processor
->state
== PROCESSOR_RUNNING
||
2150 processor
->state
== PROCESSOR_SHUTDOWN
) &&
2151 thread
->sched_pri
>= processor
->current_pri
) {
2152 cause_ast_check(processor
);
2156 if ( processor
->state
== PROCESSOR_SHUTDOWN
&&
2157 thread
->sched_pri
>= processor
->current_pri
) {
2158 cause_ast_check(processor
);
2164 #define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets)
2169 * Return the next sibling pset containing
2170 * available processors.
2172 * Returns the original pset if none other is
2175 static processor_set_t
2177 processor_set_t pset
)
2179 processor_set_t nset
= pset
;
2182 nset
= next_pset(nset
);
2183 } while (nset
->processor_count
< 1 && nset
!= pset
);
2185 return ((nset
!= pset
)? nset
: pset
);
2191 * Choose a processor for the thread, beginning at
2194 * Returns a processor, possibly from a different pset.
2196 * The thread must be locked. The pset must be locked,
2197 * and the resulting pset is locked on return.
2201 processor_set_t pset
,
2204 processor_set_t nset
, cset
= pset
;
2205 processor_t processor
;
2208 * Iterate through the processor sets to locate
2209 * an appropriate processor.
2213 * Choose an idle processor.
2215 if (!queue_empty(&cset
->idle_queue
))
2216 return ((processor_t
)queue_first(&cset
->idle_queue
));
2218 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
2220 * For an RT thread, iterate through active processors, first fit.
2222 processor
= (processor_t
)queue_first(&cset
->active_queue
);
2223 while (!queue_end(&cset
->active_queue
, (queue_entry_t
)processor
)) {
2224 if (thread
->sched_pri
> processor
->current_pri
||
2225 thread
->realtime
.deadline
< processor
->deadline
)
2228 processor
= (processor_t
)queue_next((queue_entry_t
)processor
);
2233 * Choose the low hint processor in the processor set if available.
2235 processor
= cset
->low_hint
;
2236 if (processor
!= PROCESSOR_NULL
&&
2237 processor
->state
!= PROCESSOR_SHUTDOWN
&& processor
->state
!= PROCESSOR_OFF_LINE
)
2241 * Choose any active processor if the hint was invalid.
2243 processor
= (processor_t
)dequeue_head(&cset
->active_queue
);
2244 if (processor
!= PROCESSOR_NULL
) {
2245 enqueue_tail(&cset
->active_queue
, (queue_entry_t
)processor
);
2251 * Move onto the next processor set.
2253 nset
= next_pset(cset
);
2261 } while (nset
!= pset
);
2264 * If all else fails choose the current processor,
2265 * this routine must return a running processor.
2267 processor
= current_processor();
2268 if (cset
!= processor
->processor_set
) {
2271 cset
= processor
->processor_set
;
2281 * Dispatch thread for execution, onto an idle
2282 * processor or run queue, and signal a preemption
2285 * Thread must be locked.
2292 processor_t processor
;
2293 processor_set_t pset
;
2296 assert(thread_runnable(thread
));
2300 * Update priority if needed.
2302 if (thread
->sched_stamp
!= sched_tick
)
2303 update_priority(thread
);
2305 assert(thread
->runq
== PROCESSOR_NULL
);
2307 if (thread
->bound_processor
== PROCESSOR_NULL
) {
2311 if (thread
->affinity_set
!= AFFINITY_SET_NULL
) {
2313 * Use affinity set policy hint.
2315 pset
= thread
->affinity_set
->aset_pset
;
2318 processor
= choose_processor(pset
, thread
);
2321 if (thread
->last_processor
!= PROCESSOR_NULL
) {
2323 * Simple (last processor) affinity case.
2325 processor
= thread
->last_processor
;
2326 pset
= processor
->processor_set
;
2330 * Choose a different processor in certain cases.
2332 if (processor
->state
== PROCESSOR_SHUTDOWN
|| processor
->state
== PROCESSOR_OFF_LINE
)
2333 processor
= choose_processor(pset
, thread
);
2335 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
2337 * If the processor is executing an RT thread with
2338 * an earlier deadline, choose another.
2340 if (thread
->sched_pri
<= processor
->current_pri
||
2341 thread
->realtime
.deadline
>= processor
->deadline
)
2342 processor
= choose_processor(pset
, thread
);
2345 if (processor
->state
!= PROCESSOR_IDLE
&& pset
->idle_count
> 0) {
2346 processor
= choose_processor(pset
, thread
);
2349 processor_set_t nset
= choose_next_pset(pset
);
2352 * Bump into a lesser loaded processor set if appropriate.
2354 if (pset
!= nset
&& (nset
->low_hint
== PROCESSOR_NULL
||
2355 (pset
->idle_count
== 0 && nset
->idle_count
> 0) ||
2356 processor
->runq
.count
> nset
->low_hint
->runq
.count
)) {
2362 processor
= choose_processor(pset
, thread
);
2370 * Choose a processor from the current processor set.
2372 processor
= current_processor();
2373 pset
= processor
->processor_set
;
2376 processor
= choose_processor(pset
, thread
);
2383 * Unconditionally dispatch on the processor.
2385 processor
= thread
->bound_processor
;
2386 pset
= processor
->processor_set
;
2391 * Dispatch the thread on the choosen processor.
2393 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2394 realtime_setrun(processor
, thread
);
2396 processor_setrun(processor
, thread
, options
);
2400 * processor_queue_shutdown:
2402 * Shutdown a processor run queue by moving
2403 * non-bound threads to the current processor.
2405 * Associated pset must be locked, and is
2406 * returned unlocked.
2409 processor_queue_shutdown(
2410 processor_t processor
)
2412 processor_set_t pset
= processor
->processor_set
;
2413 run_queue_t rq
= &processor
->runq
;
2414 queue_t queue
= rq
->queues
+ rq
->highq
;
2415 int pri
= rq
->highq
, count
= rq
->count
;
2416 thread_t next
, thread
;
2417 queue_head_t tqueue
;
2419 queue_init(&tqueue
);
2422 thread
= (thread_t
)queue_first(queue
);
2423 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
2424 next
= (thread_t
)queue_next((queue_entry_t
)thread
);
2426 if (thread
->bound_processor
!= processor
) {
2427 remqueue(queue
, (queue_entry_t
)thread
);
2429 thread
->runq
= PROCESSOR_NULL
;
2431 if (testbit(pri
, sched_preempt_pri
)) {
2432 rq
->urgency
--; assert(rq
->urgency
>= 0);
2434 if (queue_empty(queue
)) {
2436 clrbit(MAXPRI
- pri
, rq
->bitmap
);
2437 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2440 enqueue_tail(&tqueue
, (queue_entry_t
)thread
);
2452 processor
= current_processor();
2453 pset
= processor
->processor_set
;
2455 while ((thread
= (thread_t
)dequeue_head(&tqueue
)) != THREAD_NULL
) {
2456 thread_lock(thread
);
2457 thread
->last_processor
= PROCESSOR_NULL
;
2461 processor_enqueue(processor
, thread
, SCHED_TAILQ
);
2465 thread_unlock(thread
);
2470 * Check for a possible preemption point in
2471 * the (current) thread.
2473 * Called at splsched.
2478 processor_t processor
)
2480 int current_pri
= thread
->sched_pri
;
2481 ast_t result
= AST_NONE
;
2484 if (first_timeslice(processor
)) {
2486 if (runq
->highq
>= BASEPRI_RTQUEUES
)
2487 return (AST_PREEMPT
| AST_URGENT
);
2489 if (runq
->highq
> current_pri
) {
2490 if (runq
->urgency
> 0)
2491 return (AST_PREEMPT
| AST_URGENT
);
2493 result
|= AST_PREEMPT
;
2496 runq
= &processor
->runq
;
2497 if (runq
->highq
> current_pri
) {
2498 if (runq
->urgency
> 0)
2499 return (AST_PREEMPT
| AST_URGENT
);
2501 result
|= AST_PREEMPT
;
2506 if (runq
->highq
>= current_pri
) {
2507 if (runq
->urgency
> 0)
2508 return (AST_PREEMPT
| AST_URGENT
);
2510 result
|= AST_PREEMPT
;
2513 runq
= &processor
->runq
;
2514 if (runq
->highq
>= current_pri
) {
2515 if (runq
->urgency
> 0)
2516 return (AST_PREEMPT
| AST_URGENT
);
2518 result
|= AST_PREEMPT
;
2522 if (result
!= AST_NONE
)
2525 if (thread
->state
& TH_SUSP
)
2526 result
|= AST_PREEMPT
;
2534 * Set the scheduled priority of the specified thread.
2536 * This may cause the thread to change queues.
2538 * Thread must be locked.
2545 boolean_t removed
= run_queue_remove(thread
);
2547 thread
->sched_pri
= priority
;
2549 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
2551 if (thread
->state
& TH_RUN
) {
2552 processor_t processor
= thread
->last_processor
;
2554 if (thread
== current_thread()) {
2555 ast_t preempt
= csw_check(thread
, processor
);
2557 if (preempt
!= AST_NONE
)
2559 processor
->current_pri
= priority
;
2562 if ( processor
!= PROCESSOR_NULL
&&
2563 processor
->active_thread
== thread
)
2564 cause_ast_check(processor
);
2578 if (rq
!= thread
->runq
)
2579 panic("run_queue_check: thread runq");
2581 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
)
2582 panic("run_queue_check: thread sched_pri");
2584 q
= &rq
->queues
[thread
->sched_pri
];
2585 qe
= queue_first(q
);
2586 while (!queue_end(q
, qe
)) {
2587 if (qe
== (queue_entry_t
)thread
)
2590 qe
= queue_next(qe
);
2593 panic("run_queue_check: end");
2601 * Remove a thread from a current run queue and
2602 * return TRUE if successful.
2604 * Thread must be locked.
2610 processor_t processor
= thread
->runq
;
2613 * If processor is PROCESSOR_NULL, the thread will stay out of the
2614 * run queues because the caller locked the thread. Otherwise
2615 * the thread is on a run queue, but could be chosen for dispatch
2618 if (processor
!= PROCESSOR_NULL
) {
2623 * The processor run queues are locked by the
2624 * processor set. Real-time priorities use a
2625 * global queue with a dedicated lock.
2627 if (thread
->sched_pri
< BASEPRI_RTQUEUES
) {
2628 rqlock
= &processor
->processor_set
->sched_lock
;
2629 rq
= &processor
->runq
;
2632 rqlock
= &rt_lock
; rq
= &rt_runq
;
2635 simple_lock(rqlock
);
2637 if (processor
== thread
->runq
) {
2639 * Thread is on a run queue and we have a lock on
2642 remqueue(&rq
->queues
[0], (queue_entry_t
)thread
);
2644 if (testbit(thread
->sched_pri
, sched_preempt_pri
)) {
2645 rq
->urgency
--; assert(rq
->urgency
>= 0);
2648 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
2649 /* update run queue status */
2650 if (thread
->sched_pri
!= IDLEPRI
)
2651 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2652 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2655 thread
->runq
= PROCESSOR_NULL
;
2659 * The thread left the run queue before we could
2660 * lock the run queue.
2662 assert(thread
->runq
== PROCESSOR_NULL
);
2663 processor
= PROCESSOR_NULL
;
2666 simple_unlock(rqlock
);
2669 return (processor
!= PROCESSOR_NULL
);
2675 * Choose a thread to execute from the run queues
2676 * and return it. May steal a thread from another
2679 * Called with pset scheduling lock and rt lock held,
2680 * released on return.
2684 processor_t processor
)
2686 processor_set_t pset
= processor
->processor_set
;
2689 if (processor
->runq
.count
> 0 && processor
->runq
.highq
>= rt_runq
.highq
) {
2690 simple_unlock(&rt_lock
);
2692 pset_hint_low(pset
, processor
);
2694 if (pset
->high_hint
!= PROCESSOR_NULL
) {
2695 if (processor
!= pset
->high_hint
) {
2696 if (processor
->runq
.count
>= pset
->high_hint
->runq
.count
)
2697 pset
->high_hint
= processor
;
2699 if (pset
->high_hint
->runq
.highq
> processor
->runq
.highq
) {
2700 thread
= steal_thread(pset
->high_hint
);
2701 if (thread
!= THREAD_NULL
) {
2702 processor
->deadline
= UINT64_MAX
;
2711 pset
->high_hint
= processor
;
2713 thread
= run_queue_dequeue(&processor
->runq
, SCHED_HEADQ
);
2715 processor
->deadline
= UINT64_MAX
;
2721 thread
= run_queue_dequeue(&rt_runq
, SCHED_HEADQ
);
2722 simple_unlock(&rt_lock
);
2724 processor
->deadline
= thread
->realtime
.deadline
;
2733 * Steal a thread from a processor and return it.
2735 * Associated pset must be locked. Returns THREAD_NULL
2740 processor_t processor
)
2742 run_queue_t rq
= &processor
->runq
;
2743 queue_t queue
= rq
->queues
+ rq
->highq
;
2744 int pri
= rq
->highq
, count
= rq
->count
;
2745 thread_t thread
= THREAD_NULL
;
2748 thread
= (thread_t
)queue_first(queue
);
2749 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
2750 if (thread
->bound_processor
!= processor
) {
2751 remqueue(queue
, (queue_entry_t
)thread
);
2753 thread
->runq
= PROCESSOR_NULL
;
2755 if (testbit(pri
, sched_preempt_pri
)) {
2756 rq
->urgency
--; assert(rq
->urgency
>= 0);
2758 if (queue_empty(queue
)) {
2760 clrbit(MAXPRI
- pri
, rq
->bitmap
);
2761 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2768 thread
= (thread_t
)queue_next((queue_entry_t
)thread
);
2774 return (THREAD_NULL
);
2778 * This is the processor idle loop, which just looks for other threads
2779 * to execute. Processor idle threads invoke this without supplying a
2780 * current thread to idle without an asserted wait state.
2782 * Returns a the next thread to execute if dispatched directly.
2787 processor_t processor
)
2789 processor_set_t pset
= processor
->processor_set
;
2790 thread_t new_thread
;
2796 pmsDown(); /* Step power down */
2799 KERNEL_DEBUG_CONSTANT(
2800 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_START
, (int)thread
, 0, 0, 0, 0);
2802 timer_switch(&PROCESSOR_DATA(processor
, system_state
),
2803 mach_absolute_time(), &PROCESSOR_DATA(processor
, idle_state
));
2804 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, idle_state
);
2806 while (processor
->next_thread
== THREAD_NULL
&& processor
->runq
.count
== 0 &&
2807 (thread
== THREAD_NULL
|| ((thread
->state
& (TH_WAIT
|TH_SUSP
)) == TH_WAIT
&& !thread
->wake_active
))) {
2808 volatile processor_t hint
;
2814 if (pset
->low_hint
== PROCESSOR_NULL
)
2817 hint
= pset
->high_hint
;
2818 if (hint
!= PROCESSOR_NULL
&& hint
->runq
.count
> 0)
2822 timer_switch(&PROCESSOR_DATA(processor
, idle_state
),
2823 mach_absolute_time(), &PROCESSOR_DATA(processor
, system_state
));
2824 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, system_state
);
2829 pmsStep(0); /* Step up out of idle power */
2832 state
= processor
->state
;
2833 if (state
== PROCESSOR_DISPATCHING
) {
2835 * Commmon case -- cpu dispatched.
2837 new_thread
= processor
->next_thread
;
2838 processor
->next_thread
= THREAD_NULL
;
2839 processor
->state
= PROCESSOR_RUNNING
;
2841 if ( processor
->runq
.highq
> new_thread
->sched_pri
||
2842 (rt_runq
.highq
> 0 && rt_runq
.highq
>= new_thread
->sched_pri
) ) {
2843 processor
->deadline
= UINT64_MAX
;
2847 thread_lock(new_thread
);
2848 thread_setrun(new_thread
, SCHED_HEADQ
);
2849 thread_unlock(new_thread
);
2851 KERNEL_DEBUG_CONSTANT(
2852 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
, (int)thread
, (int)state
, 0, 0, 0);
2854 return (THREAD_NULL
);
2859 KERNEL_DEBUG_CONSTANT(
2860 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
, (int)thread
, (int)state
, (int)new_thread
, 0, 0);
2862 return (new_thread
);
2865 if (state
== PROCESSOR_IDLE
) {
2866 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2869 processor
->state
= PROCESSOR_RUNNING
;
2870 enqueue_head(&pset
->active_queue
, (queue_entry_t
)processor
);
2873 if (state
== PROCESSOR_SHUTDOWN
) {
2875 * Going off-line. Force a
2878 if ((new_thread
= processor
->next_thread
) != THREAD_NULL
) {
2879 processor
->next_thread
= THREAD_NULL
;
2880 processor
->deadline
= UINT64_MAX
;
2884 thread_lock(new_thread
);
2885 thread_setrun(new_thread
, SCHED_HEADQ
);
2886 thread_unlock(new_thread
);
2888 KERNEL_DEBUG_CONSTANT(
2889 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
, (int)thread
, (int)state
, 0, 0, 0);
2891 return (THREAD_NULL
);
2897 KERNEL_DEBUG_CONSTANT(
2898 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
, (int)thread
, (int)state
, 0, 0, 0);
2900 return (THREAD_NULL
);
2906 processor_t processor
= current_processor();
2907 thread_t new_thread
;
2909 new_thread
= processor_idle(THREAD_NULL
, processor
);
2910 if (new_thread
!= THREAD_NULL
) {
2911 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2915 thread_block((thread_continue_t
)idle_thread
);
2921 processor_t processor
)
2923 kern_return_t result
;
2927 result
= kernel_thread_create((thread_continue_t
)idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
2928 if (result
!= KERN_SUCCESS
)
2932 thread_lock(thread
);
2933 thread
->bound_processor
= processor
;
2934 processor
->idle_thread
= thread
;
2935 thread
->sched_pri
= thread
->priority
= IDLEPRI
;
2936 thread
->state
= (TH_RUN
| TH_IDLE
);
2937 thread_unlock(thread
);
2940 thread_deallocate(thread
);
2942 return (KERN_SUCCESS
);
2945 static uint64_t sched_tick_deadline
;
2950 * Kicks off scheduler services.
2952 * Called at splsched.
2957 kern_return_t result
;
2960 result
= kernel_thread_start_priority((thread_continue_t
)sched_tick_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
2961 if (result
!= KERN_SUCCESS
)
2962 panic("sched_startup");
2964 thread_deallocate(thread
);
2967 * Yield to the sched_tick_thread while it times
2968 * a series of context switches back. It stores
2969 * the baseline value in sched_cswtime.
2971 * The current thread is the only other thread
2972 * active at this point.
2974 while (sched_cswtime
== 0)
2975 thread_block(THREAD_CONTINUE_NULL
);
2977 thread_daemon_init();
2979 thread_call_initialize();
2983 * sched_tick_thread:
2985 * Perform periodic bookkeeping functions about ten
2989 sched_tick_continue(void)
2991 uint64_t abstime
= mach_absolute_time();
2996 * Compute various averages.
3001 * Scan the run queues for threads which
3002 * may need to be updated.
3004 thread_update_scan();
3006 if (pm_tick_callout
!= NULL
)
3007 (*pm_tick_callout
)();
3009 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
3010 &sched_tick_deadline
);
3012 assert_wait_deadline((event_t
)sched_tick_thread
, THREAD_UNINT
, sched_tick_deadline
);
3013 thread_block((thread_continue_t
)sched_tick_continue
);
3018 * Time a series of context switches to determine
3019 * a baseline. Toss the high and low and return
3020 * the one-way value.
3025 uint32_t new, hi
, low
, accum
;
3029 accum
= hi
= low
= 0;
3030 for (i
= 0; i
< tries
; ++i
) {
3031 abstime
= mach_absolute_time();
3032 thread_block(THREAD_CONTINUE_NULL
);
3034 new = mach_absolute_time() - abstime
;
3037 accum
= hi
= low
= new;
3048 return ((accum
- hi
- low
) / (2 * (tries
- 2)));
3052 sched_tick_thread(void)
3054 sched_cswtime
= time_cswitch();
3056 sched_tick_deadline
= mach_absolute_time();
3058 sched_tick_continue();
3063 * thread_update_scan / runq_scan:
3065 * Scan the run queues to account for timesharing threads
3066 * which need to be updated.
3068 * Scanner runs in two passes. Pass one squirrels likely
3069 * threads away in an array, pass two does the update.
3071 * This is necessary because the run queue is locked for
3072 * the candidate scan, but the thread is locked for the update.
3074 * Array should be sized to make forward progress, without
3075 * disabling preemption for long periods.
3078 #define THREAD_UPDATE_SIZE 128
3080 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
3081 static int thread_update_count
= 0;
3084 * Scan a runq for candidate threads.
3086 * Returns TRUE if retry is needed.
3094 register thread_t thread
;
3096 if ((count
= runq
->count
) > 0) {
3097 q
= runq
->queues
+ runq
->highq
;
3099 queue_iterate(q
, thread
, thread_t
, links
) {
3100 if ( thread
->sched_stamp
!= sched_tick
&&
3101 (thread
->sched_mode
& TH_MODE_TIMESHARE
) ) {
3102 if (thread_update_count
== THREAD_UPDATE_SIZE
)
3105 thread_update_array
[thread_update_count
++] = thread
;
3106 thread_reference_internal(thread
);
3120 thread_update_scan(void)
3122 boolean_t restart_needed
= FALSE
;
3123 processor_t processor
= processor_list
;
3124 processor_set_t pset
;
3130 pset
= processor
->processor_set
;
3135 restart_needed
= runq_scan(&processor
->runq
);
3143 thread
= processor
->idle_thread
;
3144 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
3145 if (thread_update_count
== THREAD_UPDATE_SIZE
) {
3146 restart_needed
= TRUE
;
3150 thread_update_array
[thread_update_count
++] = thread
;
3151 thread_reference_internal(thread
);
3153 } while ((processor
= processor
->processor_list
) != NULL
);
3156 * Ok, we now have a collection of candidates -- fix them.
3158 while (thread_update_count
> 0) {
3159 thread
= thread_update_array
[--thread_update_count
];
3160 thread_update_array
[thread_update_count
] = THREAD_NULL
;
3163 thread_lock(thread
);
3164 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
3165 thread
->sched_stamp
!= sched_tick
)
3166 update_priority(thread
);
3167 thread_unlock(thread
);
3170 thread_deallocate(thread
);
3172 } while (restart_needed
);
3176 * Just in case someone doesn't use the macro
3178 #undef thread_wakeup
3187 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
3191 preemption_enabled(void)
3193 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
3201 return ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
);
3206 #include <ddb/db_output.h>
3207 #define printf kdbprintf
3208 void db_sched(void);
3213 iprintf("Scheduling Statistics:\n");
3215 iprintf("Thread invocations: csw %d same %d\n",
3216 c_thread_invoke_csw
, c_thread_invoke_same
);
3218 iprintf("Thread block: calls %d\n",
3219 c_thread_block_calls
);
3220 iprintf("Idle thread:\n\thandoff %d block %d\n",
3221 c_idle_thread_handoff
,
3222 c_idle_thread_block
);
3223 iprintf("Sched thread blocks: %d\n", c_sched_thread_block
);
3224 #endif /* MACH_COUNTERS */
3228 #include <ddb/db_output.h>
3229 void db_show_thread_log(void);
3232 db_show_thread_log(void)
3235 #endif /* MACH_KDB */